From c6ec514304b73480297b68c0d0da8f105df8c926 Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Thu, 21 Nov 2024 09:08:39 +0000 Subject: [PATCH 01/38] wip: do not build ITransactionFileTracker directly into the chunks, find it on the workitem instead (so that it can vary depending on the user) (not actually setting it in the workitem yet) --- .../SwitchChunkFailureTests.cs | 3 +- .../LogReplicationWithExistingDbFixture.cs | 3 +- ...tfchunkreader_existsat_optimizer_should.cs | 3 +- .../Scavenging/scavenged_chunk.cs | 3 +- .../TransactionLog/TFChunkHelper.cs | 2 +- .../Validation/when_validating_tfchunk_db.cs | 2 +- .../when_opening_existing_tfchunk.cs | 2 +- ..._opening_tfchunk_from_non_existing_file.cs | 2 +- .../when_reading_from_a_cached_tfchunk.cs | 2 +- .../when_uncaching_a_tfchunk.cs | 2 +- .../Scavenge/Infrastructure/Scenario.cs | 3 +- src/EventStore.Core/ClusterVNode.cs | 2 +- src/EventStore.Core/MetricsBootstrapper.cs | 2 +- .../Services/RedactionService.cs | 3 +- .../Chunks/TFChunk/ReaderWorkItem.cs | 5 +++ .../TransactionLog/Chunks/TFChunk/TFChunk.cs | 35 +++++++++---------- .../Chunks/TFChunk/TFChunkReadSide.cs | 14 ++++---- .../TransactionLog/Chunks/TFChunkDb.cs | 19 ++++------ .../TransactionLog/Chunks/TFChunkManager.cs | 15 +++----- .../TransactionLog/Chunks/TFChunkScavenger.cs | 6 ++-- .../Chunks/TransactionFileTracker.cs | 5 --- .../TransactionLog/ITransactionFileTracker.cs | 7 ++++ .../DbAccess/ChunkWriterForExecutor.cs | 3 +- 23 files changed, 63 insertions(+), 80 deletions(-) diff --git a/src/EventStore.Core.Tests/Services/RedactionService/SwitchChunkFailureTests.cs b/src/EventStore.Core.Tests/Services/RedactionService/SwitchChunkFailureTests.cs index 3b8e2b06e3d..77c8ff4b8f7 100644 --- a/src/EventStore.Core.Tests/Services/RedactionService/SwitchChunkFailureTests.cs +++ b/src/EventStore.Core.Tests/Services/RedactionService/SwitchChunkFailureTests.cs @@ -131,8 +131,7 @@ public async Task cannot_switch_with_chunk_having_mismatched_range() { newChunk = $"{nameof(cannot_switch_with_chunk_having_mismatched_range)}-chunk-0-2.tmp"; var chunkHeader = new ChunkHeader(1, 1024, 0, 2, true, Guid.NewGuid()); - var chunk = TFChunk.CreateWithHeader(Path.Combine(PathName, newChunk), chunkHeader, 1024, false, false, false, 1, 1, false, - new TFChunkTracker.NoOp()); + var chunk = TFChunk.CreateWithHeader(Path.Combine(PathName, newChunk), chunkHeader, 1024, false, false, false, 1, 1, false); chunk.Dispose(); msg = await SwitchChunk(GetChunk(0, 0), newChunk); Assert.AreEqual(SwitchChunkResult.ChunkRangeDoesNotMatch, msg.Result); diff --git a/src/EventStore.Core.Tests/Services/Replication/LogReplication/LogReplicationWithExistingDbFixture.cs b/src/EventStore.Core.Tests/Services/Replication/LogReplication/LogReplicationWithExistingDbFixture.cs index 10fe3d58eee..93e8ba66161 100644 --- a/src/EventStore.Core.Tests/Services/Replication/LogReplication/LogReplicationWithExistingDbFixture.cs +++ b/src/EventStore.Core.Tests/Services/Replication/LogReplication/LogReplicationWithExistingDbFixture.cs @@ -46,8 +46,7 @@ protected static Task CreateChunk(TFChunkDb db, bool raw, bool complete, int chu writethrough: db.Config.WriteThrough, initialReaderCount: db.Config.InitialReaderCount, maxReaderCount: db.Config.MaxReaderCount, - reduceFileCachePressure: db.Config.ReduceFileCachePressure, - tracker: new TFChunkTracker.NoOp()); + reduceFileCachePressure: db.Config.ReduceFileCachePressure); var posMaps = new List(); diff --git a/src/EventStore.Core.Tests/TransactionLog/Optimization/tfchunkreader_existsat_optimizer_should.cs b/src/EventStore.Core.Tests/TransactionLog/Optimization/tfchunkreader_existsat_optimizer_should.cs index a341e145ad3..38b79fef7bf 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Optimization/tfchunkreader_existsat_optimizer_should.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Optimization/tfchunkreader_existsat_optimizer_should.cs @@ -91,8 +91,7 @@ private TFChunk CreateChunk(int chunkNumber, bool scavenged, out List po chunkNumber, chunkNumber, scavenged, false, false, false, Constants.TFChunkInitialReaderCountDefault, Constants.TFChunkMaxReaderCountDefault, - false, - new TFChunkTracker.NoOp()); + false); long offset = chunkNumber * 1024 * 1024; long logPos = 0 + offset; for (int i = 0, n = ChunkFooter.Size / PosMap.FullSize + 1; i < n; ++i) { diff --git a/src/EventStore.Core.Tests/TransactionLog/Scavenging/scavenged_chunk.cs b/src/EventStore.Core.Tests/TransactionLog/Scavenging/scavenged_chunk.cs index d08eb03519b..666933c537a 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Scavenging/scavenged_chunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Scavenging/scavenged_chunk.cs @@ -14,8 +14,7 @@ public void is_fully_resident_in_memory_when_cached() { var chunk = TFChunk.CreateNew(Filename, 1024 * 1024, 0, 0, true, false, false, false, Constants.TFChunkInitialReaderCountDefault, Constants.TFChunkMaxReaderCountDefault, - false, - new TFChunkTracker.NoOp()); + false); long logPos = 0; for (int i = 0, n = ChunkFooter.Size / PosMap.FullSize + 1; i < n; ++i) { map.Add(new PosMap(logPos, (int)logPos)); diff --git a/src/EventStore.Core.Tests/TransactionLog/TFChunkHelper.cs b/src/EventStore.Core.Tests/TransactionLog/TFChunkHelper.cs index a283a8e2dca..275c7859ff5 100644 --- a/src/EventStore.Core.Tests/TransactionLog/TFChunkHelper.cs +++ b/src/EventStore.Core.Tests/TransactionLog/TFChunkHelper.cs @@ -72,7 +72,7 @@ public static TFChunkDbConfig CreateDbConfig( public static TFChunk CreateNewChunk(string fileName, int chunkSize = 4096, bool isScavenged = false) { return TFChunk.CreateNew(fileName, chunkSize, 0, 0, isScavenged: isScavenged, inMem: false, unbuffered: false, - writethrough: false, initialReaderCount: Constants.TFChunkInitialReaderCountDefault, maxReaderCount: Constants.TFChunkMaxReaderCountDefault, reduceFileCachePressure: false, tracker: new TFChunkTracker.NoOp()); + writethrough: false, initialReaderCount: Constants.TFChunkInitialReaderCountDefault, maxReaderCount: Constants.TFChunkMaxReaderCountDefault, reduceFileCachePressure: false); } } } diff --git a/src/EventStore.Core.Tests/TransactionLog/Validation/when_validating_tfchunk_db.cs b/src/EventStore.Core.Tests/TransactionLog/Validation/when_validating_tfchunk_db.cs index 7d953814feb..5c7c0104a91 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Validation/when_validating_tfchunk_db.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Validation/when_validating_tfchunk_db.cs @@ -446,7 +446,7 @@ public void when_prelast_chunk_corrupted_throw_hash_validation_exception() { .WriteTo.Sink(sink) .MinimumLevel.Verbose() .CreateLogger()) - using (var db = new TFChunkDb(config, new TFChunkTracker.NoOp(), log)) { + using (var db = new TFChunkDb(config, log)) { byte[] contents = new byte[config.ChunkSize]; for (var i = 0; i < config.ChunkSize; i++) { contents[i] = 0; diff --git a/src/EventStore.Core.Tests/TransactionLog/when_opening_existing_tfchunk.cs b/src/EventStore.Core.Tests/TransactionLog/when_opening_existing_tfchunk.cs index feaa6c0c0f3..5c48d023dfb 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_opening_existing_tfchunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_opening_existing_tfchunk.cs @@ -18,7 +18,7 @@ public override void TestFixtureSetUp() { _testChunk = TFChunk.FromCompletedFile(Filename, true, false, Constants.TFChunkInitialReaderCountDefault, Constants.TFChunkMaxReaderCountDefault, - reduceFileCachePressure: false, tracker: new TFChunkTracker.NoOp()); + reduceFileCachePressure: false); } [TearDown] diff --git a/src/EventStore.Core.Tests/TransactionLog/when_opening_tfchunk_from_non_existing_file.cs b/src/EventStore.Core.Tests/TransactionLog/when_opening_tfchunk_from_non_existing_file.cs index 4604e10e2bb..3404a706bb5 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_opening_tfchunk_from_non_existing_file.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_opening_tfchunk_from_non_existing_file.cs @@ -9,7 +9,7 @@ public class when_opening_tfchunk_from_non_existing_file : SpecificationWithFile [Test] public void it_should_throw_a_file_not_found_exception() { Assert.Throws(() => TFChunk.FromCompletedFile(Filename, verifyHash: true, - unbufferedRead: false, initialReaderCount: Constants.TFChunkInitialReaderCountDefault, maxReaderCount: Constants.TFChunkMaxReaderCountDefault, reduceFileCachePressure: false, tracker: new TFChunkTracker.NoOp())); + unbufferedRead: false, initialReaderCount: Constants.TFChunkInitialReaderCountDefault, maxReaderCount: Constants.TFChunkMaxReaderCountDefault, reduceFileCachePressure: false)); } } } diff --git a/src/EventStore.Core.Tests/TransactionLog/when_reading_from_a_cached_tfchunk.cs b/src/EventStore.Core.Tests/TransactionLog/when_reading_from_a_cached_tfchunk.cs index a7164447673..749488a4a6c 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_reading_from_a_cached_tfchunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_reading_from_a_cached_tfchunk.cs @@ -30,7 +30,7 @@ public override void TestFixtureSetUp() { _chunk.Flush(); _chunk.Complete(); _cachedChunk = TFChunk.FromCompletedFile(Filename, verifyHash: true, unbufferedRead: false, - initialReaderCount: Constants.TFChunkInitialReaderCountDefault, maxReaderCount: Constants.TFChunkMaxReaderCountDefault, reduceFileCachePressure: false, tracker: new TFChunkTracker.NoOp()); + initialReaderCount: Constants.TFChunkInitialReaderCountDefault, maxReaderCount: Constants.TFChunkMaxReaderCountDefault, reduceFileCachePressure: false); _cachedChunk.CacheInMemory(); } diff --git a/src/EventStore.Core.Tests/TransactionLog/when_uncaching_a_tfchunk.cs b/src/EventStore.Core.Tests/TransactionLog/when_uncaching_a_tfchunk.cs index 24d8ed146bd..0595ac9d8df 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_uncaching_a_tfchunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_uncaching_a_tfchunk.cs @@ -30,7 +30,7 @@ public override void TestFixtureSetUp() { _chunk.Flush(); _chunk.Complete(); _uncachedChunk = TFChunk.FromCompletedFile(Filename, verifyHash: true, unbufferedRead: false, - initialReaderCount: Constants.TFChunkInitialReaderCountDefault, maxReaderCount: Constants.TFChunkMaxReaderCountDefault, reduceFileCachePressure: false, tracker: new TFChunkTracker.NoOp()); + initialReaderCount: Constants.TFChunkInitialReaderCountDefault, maxReaderCount: Constants.TFChunkMaxReaderCountDefault, reduceFileCachePressure: false); _uncachedChunk.CacheInMemory(); _uncachedChunk.UnCacheFromMemory(); } diff --git a/src/EventStore.Core.XUnit.Tests/Scavenge/Infrastructure/Scenario.cs b/src/EventStore.Core.XUnit.Tests/Scavenge/Infrastructure/Scenario.cs index a82db9413d6..59a1f09df05 100644 --- a/src/EventStore.Core.XUnit.Tests/Scavenge/Infrastructure/Scenario.cs +++ b/src/EventStore.Core.XUnit.Tests/Scavenge/Infrastructure/Scenario.cs @@ -713,8 +713,7 @@ private void EmptyRequestedChunks(TFChunkDb db) { writethrough: false, initialReaderCount: 1, maxReaderCount: 1, - reduceFileCachePressure: false, - new TFChunkTracker.NoOp()); + reduceFileCachePressure: false); newChunk.CompleteScavenge(null); diff --git a/src/EventStore.Core/ClusterVNode.cs b/src/EventStore.Core/ClusterVNode.cs index 5eca55bb81a..49fb2e004e3 100644 --- a/src/EventStore.Core/ClusterVNode.cs +++ b/src/EventStore.Core/ClusterVNode.cs @@ -306,7 +306,7 @@ public ClusterVNode(ClusterVNodeOptions options, metricsConfiguration ??= new(); MetricsBootstrapper.Bootstrap(metricsConfiguration, dbConfig, trackers); - Db = new TFChunkDb(dbConfig, tracker: trackers.TransactionFileTracker); + Db = new TFChunkDb(dbConfig); TFChunkDbConfig CreateDbConfig( out SystemStatsHelper statsHelper, diff --git a/src/EventStore.Core/MetricsBootstrapper.cs b/src/EventStore.Core/MetricsBootstrapper.cs index cf8efe7dc5c..37e77f765fe 100644 --- a/src/EventStore.Core/MetricsBootstrapper.cs +++ b/src/EventStore.Core/MetricsBootstrapper.cs @@ -25,7 +25,7 @@ public class Trackers { public GrpcTrackers GrpcTrackers { get; } = new(); public QueueTrackers QueueTrackers { get; set; } = new(); public GossipTrackers GossipTrackers { get; set; } = new (); - public ITransactionFileTracker TransactionFileTracker { get; set; } = new TFChunkTracker.NoOp(); + public ITransactionFileTracker TransactionFileTracker { get; set; } = ITransactionFileTracker.NoOp; //qq see where this is needed now public IIndexTracker IndexTracker { get; set; } = new IndexTracker.NoOp(); public IMaxTracker WriterFlushSizeTracker { get; set; } = new MaxTracker.NoOp(); public IDurationMaxTracker WriterFlushDurationTracker { get; set; } = new DurationMaxTracker.NoOp(); diff --git a/src/EventStore.Core/Services/RedactionService.cs b/src/EventStore.Core/Services/RedactionService.cs index 67863964107..ca32f95e313 100644 --- a/src/EventStore.Core/Services/RedactionService.cs +++ b/src/EventStore.Core/Services/RedactionService.cs @@ -250,8 +250,7 @@ private bool IsValidSwitchChunkRequest(string targetChunkFile, string newChunkFi initialReaderCount: 1, maxReaderCount: 1, optimizeReadSideCache: false, - reduceFileCachePressure: true, - tracker: new TFChunkTracker.NoOp()); + reduceFileCachePressure: true); } catch (HashValidationException) { failReason = SwitchChunkResult.NewChunkHashInvalid; return false; diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/ReaderWorkItem.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/ReaderWorkItem.cs index f1db909f15e..06908348ad6 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/ReaderWorkItem.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/ReaderWorkItem.cs @@ -1,6 +1,9 @@ +#nullable enable + using System.IO; namespace EventStore.Core.TransactionLog.Chunks.TFChunk { + // ReaderWorkItems are checked out of a pool and used by one thread at a time internal class ReaderWorkItem { public readonly Stream Stream; public readonly BinaryReader Reader; @@ -11,5 +14,7 @@ public ReaderWorkItem(Stream stream, BinaryReader reader, bool isMemory) { Reader = reader; IsMemory = isMemory; } + + public ITransactionFileTracker Tracker { get; private set; } = ITransactionFileTracker.NoOp; } } diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs index 60bbe9e74c1..2505aea67cc 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs @@ -165,11 +165,11 @@ private TFChunk(string filename, } public static TFChunk FromCompletedFile(string filename, bool verifyHash, bool unbufferedRead, - int initialReaderCount, int maxReaderCount, ITransactionFileTracker tracker, bool optimizeReadSideCache = false, bool reduceFileCachePressure = false) { + int initialReaderCount, int maxReaderCount, bool optimizeReadSideCache = false, bool reduceFileCachePressure = false) { var chunk = new TFChunk(filename, initialReaderCount, maxReaderCount, TFConsts.MidpointsDepth, false, unbufferedRead, false, reduceFileCachePressure); try { - chunk.InitCompleted(verifyHash, optimizeReadSideCache, tracker); + chunk.InitCompleted(verifyHash, optimizeReadSideCache); } catch { chunk.Dispose(); throw; @@ -179,7 +179,7 @@ public static TFChunk FromCompletedFile(string filename, bool verifyHash, bool u } public static TFChunk FromOngoingFile(string filename, int writePosition, bool checkSize, bool unbuffered, - bool writethrough, int initialReaderCount, int maxReaderCount, bool reduceFileCachePressure, ITransactionFileTracker tracker) { + bool writethrough, int initialReaderCount, int maxReaderCount, bool reduceFileCachePressure) { var chunk = new TFChunk(filename, initialReaderCount, maxReaderCount, @@ -188,7 +188,7 @@ public static TFChunk FromOngoingFile(string filename, int writePosition, bool c unbuffered, writethrough, reduceFileCachePressure); try { - chunk.InitOngoing(writePosition, checkSize, tracker); + chunk.InitOngoing(writePosition, checkSize); } catch { chunk.Dispose(); throw; @@ -207,13 +207,13 @@ public static TFChunk CreateNew(string filename, bool writethrough, int initialReaderCount, int maxReaderCount, - bool reduceFileCachePressure, - ITransactionFileTracker tracker) { + bool reduceFileCachePressure) { + var size = GetAlignedSize(chunkSize + ChunkHeader.Size + ChunkFooter.Size); var chunkHeader = new ChunkHeader(CurrentChunkVersion, chunkSize, chunkStartNumber, chunkEndNumber, isScavenged, Guid.NewGuid()); return CreateWithHeader(filename, chunkHeader, size, inMem, unbuffered, writethrough, initialReaderCount, maxReaderCount, - reduceFileCachePressure, tracker); + reduceFileCachePressure); } public static TFChunk CreateWithHeader(string filename, @@ -224,8 +224,7 @@ public static TFChunk CreateWithHeader(string filename, bool writethrough, int initialReaderCount, int maxReaderCount, - bool reduceFileCachePressure, - ITransactionFileTracker tracker) { + bool reduceFileCachePressure) { var chunk = new TFChunk(filename, initialReaderCount, maxReaderCount, @@ -235,7 +234,7 @@ public static TFChunk CreateWithHeader(string filename, writethrough, reduceFileCachePressure); try { - chunk.InitNew(header, fileSize, tracker); + chunk.InitNew(header, fileSize); } catch { chunk.Dispose(); throw; @@ -244,7 +243,7 @@ public static TFChunk CreateWithHeader(string filename, return chunk; } - private void InitCompleted(bool verifyHash, bool optimizeReadSideCache, ITransactionFileTracker tracker) { + private void InitCompleted(bool verifyHash, bool optimizeReadSideCache) { var fileInfo = new FileInfo(_filename); if (!fileInfo.Exists) throw new CorruptDatabaseException(new ChunkNotFoundException(_filename)); @@ -291,8 +290,8 @@ private void InitCompleted(bool verifyHash, bool optimizeReadSideCache, ITransac } _readSide = _chunkHeader.IsScavenged - ? (IChunkReadSide)new TFChunkReadSideScavenged(this, optimizeReadSideCache, tracker) - : new TFChunkReadSideUnscavenged(this, tracker); + ? (IChunkReadSide)new TFChunkReadSideScavenged(this, optimizeReadSideCache) + : new TFChunkReadSideUnscavenged(this); // do not actually cache now because it is too slow when opening the database _readSide.RequestCaching(); @@ -301,7 +300,7 @@ private void InitCompleted(bool verifyHash, bool optimizeReadSideCache, ITransac VerifyFileHash(); } - private void InitNew(ChunkHeader chunkHeader, int fileSize, ITransactionFileTracker tracker) { + private void InitNew(ChunkHeader chunkHeader, int fileSize) { Ensure.NotNull(chunkHeader, "chunkHeader"); Ensure.Positive(fileSize, "fileSize"); @@ -319,8 +318,8 @@ private void InitNew(ChunkHeader chunkHeader, int fileSize, ITransactionFileTrac } _readSide = chunkHeader.IsScavenged - ? (IChunkReadSide)new TFChunkReadSideScavenged(this, false, tracker) - : new TFChunkReadSideUnscavenged(this, tracker); + ? (IChunkReadSide)new TFChunkReadSideScavenged(this, false) + : new TFChunkReadSideUnscavenged(this); // Always cache the active chunk // If the chunk is scavenged we will definitely mark it readonly before we are done writing to it. @@ -329,7 +328,7 @@ private void InitNew(ChunkHeader chunkHeader, int fileSize, ITransactionFileTrac } } - private void InitOngoing(int writePosition, bool checkSize, ITransactionFileTracker tracker) { + private void InitOngoing(int writePosition, bool checkSize) { Ensure.Nonnegative(writePosition, "writePosition"); var fileInfo = new FileInfo(_filename); if (!fileInfo.Exists) @@ -360,7 +359,7 @@ private void InitOngoing(int writePosition, bool checkSize, ITransactionFileTrac } } - _readSide = new TFChunkReadSideUnscavenged(this, tracker); + _readSide = new TFChunkReadSideUnscavenged(this); // Always cache the active chunk CacheInMemory(); diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunkReadSide.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunkReadSide.cs index 24e433e29ac..f3dad94211e 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunkReadSide.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunkReadSide.cs @@ -25,7 +25,7 @@ public interface IChunkReadSide { } private class TFChunkReadSideUnscavenged : TFChunkReadSide, IChunkReadSide { - public TFChunkReadSideUnscavenged(TFChunk chunk, ITransactionFileTracker tracker) : base(chunk, tracker) { + public TFChunkReadSideUnscavenged(TFChunk chunk) : base(chunk) { if (chunk.ChunkHeader.IsScavenged) throw new ArgumentException("Scavenged TFChunk passed into unscavenged chunk read side."); } @@ -142,8 +142,8 @@ private bool CacheIsOptimized { get { return _optimizeCache && _logPositionsBloomFilter != null; } } - public TFChunkReadSideScavenged(TFChunk chunk, bool optimizeCache, ITransactionFileTracker tracker) - : base(chunk, tracker) { + public TFChunkReadSideScavenged(TFChunk chunk, bool optimizeCache) + : base(chunk) { _optimizeCache = optimizeCache; if (!chunk.ChunkHeader.IsScavenged) throw new ArgumentException(string.Format("Chunk provided is not scavenged: {0}", chunk)); @@ -562,12 +562,10 @@ private static int UpperMidpointBound(Midpoint[] midpoints, long pos) { private abstract class TFChunkReadSide { protected readonly TFChunk Chunk; protected readonly ILogger _log = Log.ForContext(); - protected readonly ITransactionFileTracker _tracker; - protected TFChunkReadSide(TFChunk chunk, ITransactionFileTracker tracker) { + protected TFChunkReadSide(TFChunk chunk) { Ensure.NotNull(chunk, "chunk"); Chunk = chunk; - _tracker = tracker; } private bool ValidateRecordPosition(long actualPosition) { @@ -625,7 +623,7 @@ record = null; ValidateRecordLength(length, actualPosition); record = LogRecord.ReadFrom(workItem.Reader, length); - _tracker.OnRead(record); + workItem.Tracker.OnRead(record); int suffixLength = workItem.Reader.ReadInt32(); ValidateSuffixLength(length, suffixLength, actualPosition); @@ -706,7 +704,7 @@ record = null; } record = LogRecord.ReadFrom(workItem.Reader, length); - _tracker.OnRead(record); + workItem.Tracker.OnRead(record); return true; } diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunkDb.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunkDb.cs index 8333c1f515f..1b6d0d1e160 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunkDb.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunkDb.cs @@ -13,15 +13,13 @@ public class TFChunkDb : IDisposable { public readonly TFChunkManager Manager; private readonly ILogger _log; - private readonly ITransactionFileTracker _tracker; private int _closed; - public TFChunkDb(TFChunkDbConfig config, ITransactionFileTracker tracker = null, ILogger log = null) { + public TFChunkDb(TFChunkDbConfig config, ILogger log = null) { Ensure.NotNull(config, "config"); Config = config; - _tracker = tracker ?? new TFChunkTracker.NoOp(); - Manager = new TFChunkManager(Config, _tracker); + Manager = new TFChunkManager(Config); _log = log ?? Serilog.Log.ForContext(); } @@ -76,7 +74,6 @@ public void Open(bool verifyHash = true, bool readOnly = false, int threads = 1) unbufferedRead: Config.Unbuffered, initialReaderCount: Config.InitialReaderCount, maxReaderCount: Config.MaxReaderCount, - tracker: _tracker, optimizeReadSideCache: Config.OptimizeReadSideCache, reduceFileCachePressure: Config.ReduceFileCachePressure); else { @@ -85,8 +82,7 @@ public void Open(bool verifyHash = true, bool readOnly = false, int threads = 1) unbuffered: Config.Unbuffered, writethrough: Config.WriteThrough, initialReaderCount: Config.InitialReaderCount, maxReaderCount: Config.MaxReaderCount, - reduceFileCachePressure: Config.ReduceFileCachePressure, - tracker: _tracker); + reduceFileCachePressure: Config.ReduceFileCachePressure); // chunk is full with data, we should complete it right here if (!readOnly) chunk.Complete(); @@ -97,8 +93,7 @@ public void Open(bool verifyHash = true, bool readOnly = false, int threads = 1) initialReaderCount: Config.InitialReaderCount, maxReaderCount: Config.MaxReaderCount, optimizeReadSideCache: Config.OptimizeReadSideCache, - reduceFileCachePressure: Config.ReduceFileCachePressure, - tracker: _tracker); + reduceFileCachePressure: Config.ReduceFileCachePressure); } // This call is theadsafe. @@ -126,8 +121,7 @@ public void Open(bool verifyHash = true, bool readOnly = false, int threads = 1) initialReaderCount: Config.InitialReaderCount, maxReaderCount: Config.MaxReaderCount, optimizeReadSideCache: Config.OptimizeReadSideCache, - reduceFileCachePressure: Config.ReduceFileCachePressure, - tracker: _tracker); + reduceFileCachePressure: Config.ReduceFileCachePressure); if (lastChunk.ChunkFooter.LogicalDataSize != chunkLocalPos) { lastChunk.Dispose(); throw new CorruptDatabaseException(new BadChunkInDatabaseException( @@ -152,8 +146,7 @@ public void Open(bool verifyHash = true, bool readOnly = false, int threads = 1) unbuffered: Config.Unbuffered, writethrough: Config.WriteThrough, initialReaderCount: Config.InitialReaderCount, maxReaderCount: Config.MaxReaderCount, - reduceFileCachePressure: Config.ReduceFileCachePressure, - tracker: _tracker); + reduceFileCachePressure: Config.ReduceFileCachePressure); Manager.AddChunk(lastChunk); } } diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunkManager.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunkManager.cs index 21a1018e587..195de89a643 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunkManager.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunkManager.cs @@ -20,7 +20,6 @@ public int ChunksCount { private readonly TFChunkDbConfig _config; private readonly TFChunk.TFChunk[] _chunks = new TFChunk.TFChunk[MaxChunksCount]; - private readonly ITransactionFileTracker _tracker; private volatile int _chunksCount; private volatile bool _cachingEnabled; @@ -28,10 +27,9 @@ public int ChunksCount { private int _backgroundPassesRemaining; private int _backgroundRunning; - public TFChunkManager(TFChunkDbConfig config, ITransactionFileTracker tracker) { + public TFChunkManager(TFChunkDbConfig config) { Ensure.NotNull(config, "config"); _config = config; - _tracker = tracker; } public void EnableCaching() { @@ -103,8 +101,7 @@ public TFChunk.TFChunk CreateTempChunk(ChunkHeader chunkHeader, int fileSize) { _config.WriteThrough, _config.InitialReaderCount, _config.MaxReaderCount, - _config.ReduceFileCachePressure, - _tracker); + _config.ReduceFileCachePressure); } public TFChunk.TFChunk AddNewChunk() { @@ -121,8 +118,7 @@ public TFChunk.TFChunk AddNewChunk() { writethrough: _config.WriteThrough, initialReaderCount: _config.InitialReaderCount, maxReaderCount: _config.MaxReaderCount, - reduceFileCachePressure: _config.ReduceFileCachePressure, - tracker: _tracker); + reduceFileCachePressure: _config.ReduceFileCachePressure); AddChunk(chunk); return chunk; } @@ -147,8 +143,7 @@ public TFChunk.TFChunk AddNewChunk(ChunkHeader chunkHeader, int fileSize) { writethrough: _config.WriteThrough, initialReaderCount: _config.InitialReaderCount, maxReaderCount: _config.MaxReaderCount, - reduceFileCachePressure: _config.ReduceFileCachePressure, - tracker: _tracker); + reduceFileCachePressure: _config.ReduceFileCachePressure); AddChunk(chunk); return chunk; } @@ -205,7 +200,7 @@ public TFChunk.TFChunk SwitchChunk(TFChunk.TFChunk chunk, bool verifyHash, } newChunk = TFChunk.TFChunk.FromCompletedFile(newFileName, verifyHash, _config.Unbuffered, - _config.InitialReaderCount, _config.MaxReaderCount, _tracker, _config.OptimizeReadSideCache, _config.ReduceFileCachePressure ); + _config.InitialReaderCount, _config.MaxReaderCount, _config.OptimizeReadSideCache, _config.ReduceFileCachePressure ); } lock (_chunksLocker) { diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunkScavenger.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunkScavenger.cs index 2430155e306..60ba8b47c68 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunkScavenger.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunkScavenger.cs @@ -195,8 +195,7 @@ private void ScavengeChunk(bool alwaysKeepScavenged, TFChunk.TFChunk oldChunk, writethrough: _db.Config.WriteThrough, initialReaderCount: _db.Config.InitialReaderCount, maxReaderCount: _db.Config.MaxReaderCount, - reduceFileCachePressure: _db.Config.ReduceFileCachePressure, - tracker: new TFChunkTracker.NoOp()); + reduceFileCachePressure: _db.Config.ReduceFileCachePressure); } catch (IOException exc) { _logger.Error(exc, "IOException during creating new chunk for scavenging purposes. Stopping scavenging process..."); @@ -431,8 +430,7 @@ private static bool MergeChunks( writethrough: db.Config.WriteThrough, initialReaderCount: db.Config.InitialReaderCount, maxReaderCount: db.Config.MaxReaderCount, - reduceFileCachePressure: db.Config.ReduceFileCachePressure, - tracker: new TFChunkTracker.NoOp()); + reduceFileCachePressure: db.Config.ReduceFileCachePressure); } catch (IOException exc) { logger.Error(exc, "IOException during creating new chunk for scavenging merge purposes. Stopping scavenging merge process..."); diff --git a/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs b/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs index 964fde3e0fa..83409efdfc7 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs @@ -23,9 +23,4 @@ public void OnRead(ILogRecord record) { _readBytes.Add(prepare.Data.Length + prepare.Metadata.Length); _readEvents.Add(1); } - - public class NoOp : ITransactionFileTracker { - public void OnRead(ILogRecord record) { - } - } } diff --git a/src/EventStore.Core/TransactionLog/ITransactionFileTracker.cs b/src/EventStore.Core/TransactionLog/ITransactionFileTracker.cs index 861f712fb21..cc5a6defef6 100644 --- a/src/EventStore.Core/TransactionLog/ITransactionFileTracker.cs +++ b/src/EventStore.Core/TransactionLog/ITransactionFileTracker.cs @@ -4,4 +4,11 @@ namespace EventStore.Core.TransactionLog; public interface ITransactionFileTracker { void OnRead(ILogRecord record); + + static readonly ITransactionFileTracker NoOp = new NoOp(); +} + +file class NoOp : ITransactionFileTracker { + public void OnRead(ILogRecord record) { + } } diff --git a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkWriterForExecutor.cs b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkWriterForExecutor.cs index 222d6279149..380a954d7de 100644 --- a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkWriterForExecutor.cs +++ b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkWriterForExecutor.cs @@ -44,8 +44,7 @@ public ChunkWriterForExecutor( writethrough: dbConfig.WriteThrough, initialReaderCount: dbConfig.InitialReaderCount, maxReaderCount: dbConfig.MaxReaderCount, - reduceFileCachePressure: dbConfig.ReduceFileCachePressure, - tracker: new TFChunkTracker.NoOp()); + reduceFileCachePressure: dbConfig.ReduceFileCachePressure); } public string FileName { get; } From 9e87c73ad7a8c47c1e30ef4cd1290564b51b5a98 Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Thu, 21 Nov 2024 09:58:53 +0000 Subject: [PATCH 02/38] add infra for setting/unsetting the tracker when checking out a readerworkitem --- .../TransactionLog/Chunks/TFChunk/ReaderWorkItem.cs | 8 ++++++++ .../TransactionLog/Chunks/TFChunk/TFChunk.cs | 10 +++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/ReaderWorkItem.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/ReaderWorkItem.cs index 06908348ad6..dbdfe194235 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/ReaderWorkItem.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/ReaderWorkItem.cs @@ -16,5 +16,13 @@ public ReaderWorkItem(Stream stream, BinaryReader reader, bool isMemory) { } public ITransactionFileTracker Tracker { get; private set; } = ITransactionFileTracker.NoOp; + + public void OnCheckedOut(ITransactionFileTracker tracker) { + Tracker = tracker; + } + + public void OnReturned() { + Tracker = ITransactionFileTracker.NoOp; + } } } diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs index 2505aea67cc..b3795a5bfaf 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs @@ -1081,7 +1081,14 @@ public void WaitForDestroy(int timeoutMs) { throw new TimeoutException(); } - private ReaderWorkItem GetReaderWorkItem() { + //qq todo always provide a tracker + private ReaderWorkItem GetReaderWorkItem(ITransactionFileTracker tracker = null) { + var item = GetReaderWorkItemImpl(); + item.OnCheckedOut(tracker ?? ITransactionFileTracker.NoOp); + return item; + } + + private ReaderWorkItem GetReaderWorkItemImpl() { if (_selfdestructin54321) throw new FileBeingDeletedException(); @@ -1138,6 +1145,7 @@ private ReaderWorkItem GetReaderWorkItem() { } private void ReturnReaderWorkItem(ReaderWorkItem item) { + item.OnReturned(); if (item.IsMemory) { // we avoid taking the _cachedDataLock here every time because we would be // contending with other reader threads also returning readerworkitems. From 701a4cab009af63dbdcb0dff3604f4f677e01b36 Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Thu, 21 Nov 2024 11:17:58 +0000 Subject: [PATCH 03/38] builds! plumbing towards providing the tracker on read --- .../Fakes/FakeTfReader.cs | 8 ++++ .../Index/FakeIndexReader.cs | 8 ++++ .../IndexV1/table_index_on_range_query.cs | 2 +- .../table_index_on_try_get_one_value_query.cs | 2 +- .../Index/IndexV1/table_index_should.cs | 2 +- ...index_with_corrupt_index_entries_should.cs | 4 +- ...two_ptables_and_memtable_on_range_query.cs | 2 +- ..._hash_collision_when_upgrading_to_64bit.cs | 12 +++++- ...doesnt_exist_drops_entry_and_carries_on.cs | 12 +++++- ...upgrading_index_to_64bit_stream_version.cs | 12 +++++- .../Scavenge/when_scavenging_a_table_index.cs | 4 +- ...x_and_another_table_is_completed_during.cs | 4 +- ..._index_cancelled_while_scavenging_table.cs | 4 +- ..._index_cancelled_while_waiting_for_lock.cs | 4 +- .../when_scavenging_a_table_index_fails.cs | 4 +- ...tfile_with_duplicate_events_in_a_stream.cs | 4 +- .../when_hard_deleting_stream.cs | 4 +- ...hard_deleting_stream_with_log_version_0.cs | 4 +- .../Services/Storage/FakeInMemoryTFReader.cs | 8 ++++ .../HashCollisions/with_hash_collisions.cs | 14 +++++-- .../Services/Storage/ReadIndexTestScenario.cs | 2 +- .../Storage/RepeatableDbTestScenario.cs | 2 +- ...scavenging_tfchunk_with_deleted_records.cs | 4 +- ...en_scavenging_tfchunk_with_transactions.cs | 8 ++-- ...ersion0_log_records_and_deleted_records.cs | 8 ++-- ...version0_log_records_using_transactions.cs | 8 ++-- .../Services/Storage/SimpleDbTestScenario.cs | 2 +- ...dex_for_partially_persisted_transaction.cs | 2 +- .../Helpers/ScavengeTestScenario.cs | 6 +-- .../Truncation/TruncateAndReOpenDbScenario.cs | 2 +- ...hen_appending_to_a_tfchunk_and_flushing.cs | 4 +- .../when_creating_tfchunk_from_empty_file.cs | 7 ++-- ...venged_tfchunk_with_all_records_removed.cs | 4 +- ..._reading_cached_empty_scavenged_tfchunk.cs | 5 ++- .../when_reading_from_a_cached_tfchunk.cs | 4 +- ...eading_uncached_empty_scavenged_tfchunk.cs | 5 ++- ...n_writing_multiple_records_to_a_tfchunk.cs | 7 ++-- .../LogFormatAbstractorV3Tests.cs | 2 +- ...V2StreamExistenceFilterInitializerTests.cs | 8 ++-- .../LogV3/PartitionManagerTests.cs | 10 ++++- .../Scavenge/Infrastructure/Scenario.cs | 10 ++--- src/EventStore.Core/ClusterVNode.cs | 8 ++-- src/EventStore.Core/Index/TableIndex.cs | 10 ++--- .../LogAbstraction/LogFormatAbstractor.cs | 2 +- .../LogV2StreamExistenceFilterInitializer.cs | 6 +-- .../Services/Storage/ReaderIndex/AllReader.cs | 4 +- .../Storage/ReaderIndex/IndexBackend.cs | 6 +-- .../Storage/ReaderIndex/IndexCommitter.cs | 6 +-- .../Storage/ReaderIndex/IndexReader.cs | 26 ++++++------- .../Storage/ReaderIndex/IndexWriter.cs | 6 +-- .../TransactionLog/Chunks/TFChunk/TFChunk.cs | 16 ++++---- .../Chunks/TFChunk/TFChunkReadSide.cs | 38 +++++++++---------- .../TransactionLog/Chunks/TFChunkReader.cs | 14 ++++++- .../TransactionLog/Chunks/TFChunkScavenger.cs | 5 ++- .../TransactionLog/ITransactionFileReader.cs | 12 ++++-- .../DbAccess/ChunkReaderForExecutor.cs | 4 +- .../DbAccess/ChunkReaderForIndexExecutor.cs | 6 +-- .../DbAccess/IndexReaderForCalculator.cs | 6 +-- 58 files changed, 245 insertions(+), 158 deletions(-) diff --git a/src/EventStore.Core.Tests/Fakes/FakeTfReader.cs b/src/EventStore.Core.Tests/Fakes/FakeTfReader.cs index 37e0c18edaf..20e28a15f28 100644 --- a/src/EventStore.Core.Tests/Fakes/FakeTfReader.cs +++ b/src/EventStore.Core.Tests/Fakes/FakeTfReader.cs @@ -3,6 +3,14 @@ namespace EventStore.Core.Tests.Fakes { public class FakeTfReader : ITransactionFileReader { + public void OnCheckedOut(ITransactionFileTracker tracker) { + throw new NotImplementedException(); + } + + public void OnReturned() { + throw new NotImplementedException(); + } + public void Reposition(long position) { throw new NotImplementedException(); } diff --git a/src/EventStore.Core.Tests/Index/FakeIndexReader.cs b/src/EventStore.Core.Tests/Index/FakeIndexReader.cs index 35e201601bc..841d65a4d05 100644 --- a/src/EventStore.Core.Tests/Index/FakeIndexReader.cs +++ b/src/EventStore.Core.Tests/Index/FakeIndexReader.cs @@ -10,6 +10,14 @@ public FakeIndexReader(Func existsAt = null) { _existsAt = existsAt ?? (l => true); } + public void OnCheckedOut(ITransactionFileTracker tracker) { + throw new NotImplementedException(); + } + + public void OnReturned() { + throw new NotImplementedException(); + } + public void Reposition(long position) { throw new NotImplementedException(); } diff --git a/src/EventStore.Core.Tests/Index/IndexV1/table_index_on_range_query.cs b/src/EventStore.Core.Tests/Index/IndexV1/table_index_on_range_query.cs index 7c6012bfd7e..85cf52d90cf 100644 --- a/src/EventStore.Core.Tests/Index/IndexV1/table_index_on_range_query.cs +++ b/src/EventStore.Core.Tests/Index/IndexV1/table_index_on_range_query.cs @@ -34,7 +34,7 @@ public override async Task TestFixtureSetUp() { _highHasher = new Murmur3AUnsafe(); _tableIndex = new TableIndex(PathName, _lowHasher, _highHasher, "", () => new HashListMemTable(version: _ptableVersion, maxSize: 40), - () => { throw new InvalidOperationException(); }, + _ => { throw new InvalidOperationException(); }, _ptableVersion, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 5, diff --git a/src/EventStore.Core.Tests/Index/IndexV1/table_index_on_try_get_one_value_query.cs b/src/EventStore.Core.Tests/Index/IndexV1/table_index_on_try_get_one_value_query.cs index e04e5a97e9b..62188d70438 100644 --- a/src/EventStore.Core.Tests/Index/IndexV1/table_index_on_try_get_one_value_query.cs +++ b/src/EventStore.Core.Tests/Index/IndexV1/table_index_on_try_get_one_value_query.cs @@ -38,7 +38,7 @@ public override async Task TestFixtureSetUp() { _highHasher = new Murmur3AUnsafe(); _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(_ptableVersion, maxSize: 10), - () => fakeReader, + _ => fakeReader, _ptableVersion, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 5, diff --git a/src/EventStore.Core.Tests/Index/IndexV1/table_index_should.cs b/src/EventStore.Core.Tests/Index/IndexV1/table_index_should.cs index c055116bbd5..375daa85928 100644 --- a/src/EventStore.Core.Tests/Index/IndexV1/table_index_should.cs +++ b/src/EventStore.Core.Tests/Index/IndexV1/table_index_should.cs @@ -30,7 +30,7 @@ public override async Task TestFixtureSetUp() { var highHasher = new Murmur3AUnsafe(); _tableIndex = new TableIndex(PathName, lowHasher, highHasher, "", () => new HashListMemTable(_ptableVersion, maxSize: 20), - () => { throw new InvalidOperationException(); }, + _ => { throw new InvalidOperationException(); }, _ptableVersion, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 10, diff --git a/src/EventStore.Core.Tests/Index/IndexV1/table_index_with_corrupt_index_entries_should.cs b/src/EventStore.Core.Tests/Index/IndexV1/table_index_with_corrupt_index_entries_should.cs index 990f40f2a18..c3e08acfd53 100644 --- a/src/EventStore.Core.Tests/Index/IndexV1/table_index_with_corrupt_index_entries_should.cs +++ b/src/EventStore.Core.Tests/Index/IndexV1/table_index_with_corrupt_index_entries_should.cs @@ -26,7 +26,7 @@ public void ConstructTableIndexWithCorruptIndexEntries(byte version, bool skipIn _tableIndex = new TableIndex(PathName, lowHasher, highHasher, "", () => new HashListMemTable(version, maxSize: NumIndexEntries), - () => fakeReader, + _ => fakeReader, version, int.MaxValue, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: NumIndexEntries, @@ -67,7 +67,7 @@ public void ConstructTableIndexWithCorruptIndexEntries(byte version, bool skipIn //load table index again _tableIndex = new TableIndex(PathName, lowHasher, highHasher, "", () => new HashListMemTable(version, maxSize: NumIndexEntries), - () => fakeReader, + _ => fakeReader, version, int.MaxValue, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: NumIndexEntries, diff --git a/src/EventStore.Core.Tests/Index/IndexV1/table_index_with_two_ptables_and_memtable_on_range_query.cs b/src/EventStore.Core.Tests/Index/IndexV1/table_index_with_two_ptables_and_memtable_on_range_query.cs index 7cace1e5706..f0b49bd2904 100644 --- a/src/EventStore.Core.Tests/Index/IndexV1/table_index_with_two_ptables_and_memtable_on_range_query.cs +++ b/src/EventStore.Core.Tests/Index/IndexV1/table_index_with_two_ptables_and_memtable_on_range_query.cs @@ -39,7 +39,7 @@ public override async Task TestFixtureSetUp() { _highHasher = new FakeIndexHasher(); _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(_ptableVersion, maxSize: 10), - () => fakeReader, + _ => fakeReader, _ptableVersion, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 2, diff --git a/src/EventStore.Core.Tests/Index/IndexV2/table_index_hash_collision_when_upgrading_to_64bit.cs b/src/EventStore.Core.Tests/Index/IndexV2/table_index_hash_collision_when_upgrading_to_64bit.cs index fb8ca60cd78..2a1b33a3c66 100644 --- a/src/EventStore.Core.Tests/Index/IndexV2/table_index_hash_collision_when_upgrading_to_64bit.cs +++ b/src/EventStore.Core.Tests/Index/IndexV2/table_index_hash_collision_when_upgrading_to_64bit.cs @@ -39,7 +39,7 @@ public override async Task TestFixtureSetUp() { _highHasher = new Murmur3AUnsafe(); _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(PTableVersions.IndexV1, maxSize: 5), - () => fakeReader, + _ => fakeReader, PTableVersions.IndexV1, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 5 + _extraStreamHashesAtBeginning + _extraStreamHashesAtEnd, @@ -66,7 +66,7 @@ public override async Task TestFixtureSetUp() { _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(_ptableVersion, maxSize: 5), - () => fakeReader, + _ => fakeReader, _ptableVersion, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 5, @@ -146,6 +146,14 @@ public void should_have_entries_in_sorted_order() { } public class FakeIndexReader : ITransactionFileReader { + public void OnCheckedOut(ITransactionFileTracker tracker) { + throw new NotImplementedException(); + } + + public void OnReturned() { + throw new NotImplementedException(); + } + public void Reposition(long position) { throw new NotImplementedException(); } diff --git a/src/EventStore.Core.Tests/Index/IndexV2/table_index_when_merging_upgrading_to_64bit_if_entry_doesnt_exist_drops_entry_and_carries_on.cs b/src/EventStore.Core.Tests/Index/IndexV2/table_index_when_merging_upgrading_to_64bit_if_entry_doesnt_exist_drops_entry_and_carries_on.cs index 90693eedd6c..8240148c152 100644 --- a/src/EventStore.Core.Tests/Index/IndexV2/table_index_when_merging_upgrading_to_64bit_if_entry_doesnt_exist_drops_entry_and_carries_on.cs +++ b/src/EventStore.Core.Tests/Index/IndexV2/table_index_when_merging_upgrading_to_64bit_if_entry_doesnt_exist_drops_entry_and_carries_on.cs @@ -61,7 +61,7 @@ public override async Task TestFixtureSetUp() { var fakeReader = new TFReaderLease(new FakeIndexReader2()); _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, emptyStreamId, () => new HashListMemTable(PTableVersions.IndexV1, maxSize: 3), - () => fakeReader, + _ => fakeReader, PTableVersions.IndexV1, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 3, @@ -76,7 +76,7 @@ public override async Task TestFixtureSetUp() { _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, emptyStreamId, () => new HashListMemTable(_ptableVersion, maxSize: 3), - () => fakeReader, + _ => fakeReader, _ptableVersion, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 3, @@ -135,6 +135,14 @@ public void should_have_all_entries_except_scavenged() { } private class FakeIndexReader2 : ITransactionFileReader { + public void OnCheckedOut(ITransactionFileTracker tracker) { + throw new NotImplementedException(); + } + + public void OnReturned() { + throw new NotImplementedException(); + } + public void Reposition(long position) { throw new NotImplementedException(); } diff --git a/src/EventStore.Core.Tests/Index/IndexV3/when_upgrading_index_to_64bit_stream_version.cs b/src/EventStore.Core.Tests/Index/IndexV3/when_upgrading_index_to_64bit_stream_version.cs index 330385e3214..3e0f0ced63b 100644 --- a/src/EventStore.Core.Tests/Index/IndexV3/when_upgrading_index_to_64bit_stream_version.cs +++ b/src/EventStore.Core.Tests/Index/IndexV3/when_upgrading_index_to_64bit_stream_version.cs @@ -30,7 +30,7 @@ public override async Task TestFixtureSetUp() { _highHasher = new Murmur3AUnsafe(); _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(PTableVersions.IndexV2, maxSize: 5), - () => fakeReader, + _ => fakeReader, PTableVersions.IndexV2, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 5, @@ -47,7 +47,7 @@ public override async Task TestFixtureSetUp() { _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(_ptableVersion, maxSize: 5), - () => fakeReader, + _ => fakeReader, _ptableVersion, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 5, @@ -127,6 +127,14 @@ public void should_have_entries_in_sorted_order() { } public class FakeIndexReader : ITransactionFileReader { + public void OnCheckedOut(ITransactionFileTracker tracker) { + throw new NotImplementedException(); + } + + public void OnReturned() { + throw new NotImplementedException(); + } + public void Reposition(long position) { throw new NotImplementedException(); } diff --git a/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index.cs b/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index.cs index e84ec36383a..922dfa4b89b 100644 --- a/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index.cs +++ b/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index.cs @@ -39,7 +39,7 @@ public override async Task TestFixtureSetUp() { _highHasher = new Murmur3AUnsafe(); _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(PTableVersions.IndexV4, maxSize: 5), - () => fakeReader, + _ => fakeReader, PTableVersions.IndexV4, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 2, @@ -63,7 +63,7 @@ public override async Task TestFixtureSetUp() { _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(PTableVersions.IndexV4, maxSize: 5), - () => fakeReader, + _ => fakeReader, PTableVersions.IndexV4, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 2, diff --git a/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_and_another_table_is_completed_during.cs b/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_and_another_table_is_completed_during.cs index ddb006ded42..4302ceae0cf 100644 --- a/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_and_another_table_is_completed_during.cs +++ b/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_and_another_table_is_completed_during.cs @@ -46,7 +46,7 @@ public override async Task TestFixtureSetUp() { _highHasher = new Murmur3AUnsafe(); _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(PTableVersions.IndexV4, maxSize: 5), - () => fakeReader, + _ => fakeReader, PTableVersions.IndexV4, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 2, @@ -79,7 +79,7 @@ public override async Task TestFixtureSetUp() { _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(PTableVersions.IndexV4, maxSize: 5), - () => fakeReader, + _ => fakeReader, PTableVersions.IndexV4, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 2, diff --git a/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_cancelled_while_scavenging_table.cs b/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_cancelled_while_scavenging_table.cs index 0249e6a18bc..eaf87fb2002 100644 --- a/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_cancelled_while_scavenging_table.cs +++ b/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_cancelled_while_scavenging_table.cs @@ -35,7 +35,7 @@ public override async Task TestFixtureSetUp() { _highHasher = new Murmur3AUnsafe(); _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(PTableVersions.IndexV4, maxSize: 5), - () => fakeReader, + _ => fakeReader, PTableVersions.IndexV4, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 2, @@ -60,7 +60,7 @@ public override async Task TestFixtureSetUp() { _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(PTableVersions.IndexV4, maxSize: 5), - () => fakeReader, + _ => fakeReader, PTableVersions.IndexV4, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 2, diff --git a/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_cancelled_while_waiting_for_lock.cs b/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_cancelled_while_waiting_for_lock.cs index 4391e2fcbd8..a7a5b97e917 100644 --- a/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_cancelled_while_waiting_for_lock.cs +++ b/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_cancelled_while_waiting_for_lock.cs @@ -29,7 +29,7 @@ public override async Task TestFixtureSetUp() { _highHasher = new Murmur3AUnsafe(); _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(PTableVersions.IndexV4, maxSize: 5), - () => fakeReader, + _ => fakeReader, PTableVersions.IndexV4, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 2, @@ -56,7 +56,7 @@ public override async Task TestFixtureSetUp() { _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(PTableVersions.IndexV4, maxSize: 5), - () => fakeReader, + _ => fakeReader, PTableVersions.IndexV4, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 2, diff --git a/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_fails.cs b/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_fails.cs index e7fdd5bd96e..d0200209d01 100644 --- a/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_fails.cs +++ b/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_fails.cs @@ -36,7 +36,7 @@ public override async Task TestFixtureSetUp() { _highHasher = new Murmur3AUnsafe(); _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(PTableVersions.IndexV4, maxSize: 5), - () => throw new Exception("Expected exception") /* throw an exception when the first PTable scavenge starts and tries to acquire a reader */, + _ => throw new Exception("Expected exception") /* throw an exception when the first PTable scavenge starts and tries to acquire a reader */, PTableVersions.IndexV4, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 2, @@ -60,7 +60,7 @@ public override async Task TestFixtureSetUp() { _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(PTableVersions.IndexV4, maxSize: 5), - () => fakeReader, + _ => fakeReader, PTableVersions.IndexV4, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 2, diff --git a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_duplicate_events_in_a_stream.cs b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_duplicate_events_in_a_stream.cs index 310650e99cd..9824856409f 100644 --- a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_duplicate_events_in_a_stream.cs +++ b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_duplicate_events_in_a_stream.cs @@ -143,7 +143,7 @@ public override async Task TestFixtureSetUp() { var emptyStreamId = _logFormat.EmptyStreamId; _tableIndex = new TableIndex(indexDirectory, lowHasher, highHasher, emptyStreamId, () => new HashListMemTable(IndexBitnessVersion, MaxEntriesInMemTable * 2), - () => new TFReaderLease(readers), + tracker => new TFReaderLease(readers, tracker), IndexBitnessVersion, int.MaxValue, Constants.PTableMaxReaderCountDefault, @@ -192,7 +192,7 @@ public override async Task TestFixtureSetUp() { _tableIndex = new TableIndex(indexDirectory, lowHasher, highHasher, emptyStreamId, () => new HashListMemTable(IndexBitnessVersion, MaxEntriesInMemTable * 2), - () => new TFReaderLease(readers), + tracker => new TFReaderLease(readers, tracker), IndexBitnessVersion, int.MaxValue, Constants.PTableMaxReaderCountDefault, diff --git a/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_hard_deleting_stream.cs b/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_hard_deleting_stream.cs index e6d19ee5e8b..583ceea5d6a 100644 --- a/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_hard_deleting_stream.cs +++ b/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_hard_deleting_stream.cs @@ -21,10 +21,10 @@ protected override void WriteTestScenario() { public void should_change_expected_version_to_deleted_event_number_when_reading() { var chunk = Db.Manager.GetChunk(0); var chunkRecords = new List(); - RecordReadResult result = chunk.TryReadFirst(); + RecordReadResult result = chunk.TryReadFirst(ITransactionFileTracker.NoOp); while (result.Success) { chunkRecords.Add(result.LogRecord); - result = chunk.TryReadClosestForward(result.NextPosition); + result = chunk.TryReadClosestForward(result.NextPosition, ITransactionFileTracker.NoOp); } Assert.That(chunkRecords.Any(x => diff --git a/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_hard_deleting_stream_with_log_version_0.cs b/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_hard_deleting_stream_with_log_version_0.cs index 1216eaa3692..6cbdeee76d9 100644 --- a/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_hard_deleting_stream_with_log_version_0.cs +++ b/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_hard_deleting_stream_with_log_version_0.cs @@ -41,10 +41,10 @@ private void WriteV0HardDelete(string eventStreamId) { public void should_change_expected_version_to_deleted_event_number_when_reading() { var chunk = Db.Manager.GetChunk(0); var chunkRecords = new List(); - RecordReadResult result = chunk.TryReadFirst(); + RecordReadResult result = chunk.TryReadFirst(ITransactionFileTracker.NoOp); while (result.Success) { chunkRecords.Add(result.LogRecord); - result = chunk.TryReadClosestForward(result.NextPosition); + result = chunk.TryReadClosestForward(result.NextPosition, ITransactionFileTracker.NoOp); } Assert.That(chunkRecords.Any(x => diff --git a/src/EventStore.Core.Tests/Services/Storage/FakeInMemoryTFReader.cs b/src/EventStore.Core.Tests/Services/Storage/FakeInMemoryTFReader.cs index 1f043bfeb2b..834982d5729 100644 --- a/src/EventStore.Core.Tests/Services/Storage/FakeInMemoryTFReader.cs +++ b/src/EventStore.Core.Tests/Services/Storage/FakeInMemoryTFReader.cs @@ -15,6 +15,14 @@ public FakeInMemoryTfReader(int recordOffset){ _recordOffset = recordOffset; } + public void OnCheckedOut(ITransactionFileTracker tracker) { + throw new NotImplementedException(); + } + + public void OnReturned() { + throw new NotImplementedException(); + } + public void AddRecord(ILogRecord record, long position){ _records.Add(position, record); } diff --git a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_hash_collisions.cs b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_hash_collisions.cs index 582383ce553..33d578e8b32 100644 --- a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_hash_collisions.cs +++ b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_hash_collisions.cs @@ -52,7 +52,7 @@ public void Setup() { _highHasher = _logFormat.HighHasher; _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, _logFormat.EmptyStreamId, () => new HashListMemTable(PTableVersions.IndexV1, maxSize: _maxMemTableSize), - () => _fakeReader, + _ => _fakeReader, PTableVersions.IndexV1, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: _maxMemTableSize, @@ -311,7 +311,7 @@ protected override void when() { _tableIndex.Close(false); _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(PTableVersions.IndexV2, maxSize: _maxMemTableSize), - () => _fakeReader, + _ => _fakeReader, PTableVersions.IndexV2, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: _maxMemTableSize, @@ -419,7 +419,7 @@ public FakeIndexBackend(TFReaderLease readerLease) { _readerLease = readerLease; } - public TFReaderLease BorrowReader() { + public TFReaderLease BorrowReader(ITransactionFileTracker tracker) { return _readerLease; } @@ -462,6 +462,14 @@ public EventStore.Core.Data.SystemSettings GetSystemSettings() { } public class FakeReader : ITransactionFileReader { + public void OnCheckedOut(ITransactionFileTracker tracker) { + throw new NotImplementedException(); + } + + public void OnReturned() { + throw new NotImplementedException(); + } + public void Reposition(long position) { throw new NotImplementedException(); } diff --git a/src/EventStore.Core.Tests/Services/Storage/ReadIndexTestScenario.cs b/src/EventStore.Core.Tests/Services/Storage/ReadIndexTestScenario.cs index ca3e2a05064..a57184aecc5 100644 --- a/src/EventStore.Core.Tests/Services/Storage/ReadIndexTestScenario.cs +++ b/src/EventStore.Core.Tests/Services/Storage/ReadIndexTestScenario.cs @@ -111,7 +111,7 @@ public override async Task TestFixtureSetUp() { var emptyStreamId = _logFormat.EmptyStreamId; TableIndex = TransformTableIndex(new TableIndex(indexDirectory, LowHasher, HighHasher, emptyStreamId, () => new HashListMemTable(IndexBitnessVersion, MaxEntriesInMemTable * 2), - () => new TFReaderLease(readers), + tracker => new TFReaderLease(readers, tracker), IndexBitnessVersion, int.MaxValue, Constants.PTableMaxReaderCountDefault, diff --git a/src/EventStore.Core.Tests/Services/Storage/RepeatableDbTestScenario.cs b/src/EventStore.Core.Tests/Services/Storage/RepeatableDbTestScenario.cs index a348a4089bf..e9867ac3242 100644 --- a/src/EventStore.Core.Tests/Services/Storage/RepeatableDbTestScenario.cs +++ b/src/EventStore.Core.Tests/Services/Storage/RepeatableDbTestScenario.cs @@ -64,7 +64,7 @@ public void CreateDb(params Rec[] records) { var emptyStreamId = _logFormat.EmptyStreamId; TableIndex = new TableIndex(indexDirectory, lowHasher, highHasher, emptyStreamId, () => new HashListMemTable(PTableVersions.IndexV3, MaxEntriesInMemTable * 2), - () => new TFReaderLease(readers), + tracker => new TFReaderLease(readers, tracker), PTableVersions.IndexV3, int.MaxValue, Constants.PTableMaxReaderCountDefault, diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_deleted_records.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_deleted_records.cs index 0353829a9df..a8b0c1def2a 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_deleted_records.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_deleted_records.cs @@ -53,10 +53,10 @@ public void should_be_able_to_read_the_all_stream() { public void should_have_updated_deleted_stream_event_number() { var chunk = Db.Manager.GetChunk(0); var chunkRecords = new List(); - RecordReadResult result = chunk.TryReadFirst(); + RecordReadResult result = chunk.TryReadFirst(ITransactionFileTracker.NoOp); while (result.Success) { chunkRecords.Add(result.LogRecord); - result = chunk.TryReadClosestForward(result.NextPosition); + result = chunk.TryReadClosestForward(result.NextPosition, ITransactionFileTracker.NoOp); } var id = _logFormat.StreamIds.LookupValue(_deletedEventStreamId); diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_transactions.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_transactions.cs index 09e0a1919c3..9a64799821c 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_transactions.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_transactions.cs @@ -58,10 +58,10 @@ public void the_log_records_are_in_first_chunk() { var chunk = Db.Manager.GetChunk(0); var chunkRecords = new List(); - RecordReadResult result = chunk.TryReadFirst(); + RecordReadResult result = chunk.TryReadFirst(ITransactionFileTracker.NoOp); while (result.Success) { chunkRecords.Add(result.LogRecord); - result = chunk.TryReadClosestForward(result.NextPosition); + result = chunk.TryReadClosestForward(result.NextPosition, ITransactionFileTracker.NoOp); } Assert.AreEqual(7, chunkRecords.Count); @@ -72,10 +72,10 @@ public void the_log_records_are_unchanged_in_second_chunk() { var chunk = Db.Manager.GetChunk(1); var chunkRecords = new List(); - RecordReadResult result = chunk.TryReadFirst(); + RecordReadResult result = chunk.TryReadFirst(ITransactionFileTracker.NoOp); while (result.Success) { chunkRecords.Add(result.LogRecord); - result = chunk.TryReadClosestForward(result.NextPosition); + result = chunk.TryReadClosestForward(result.NextPosition, ITransactionFileTracker.NoOp); } Assert.AreEqual(2, chunkRecords.Count); diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_version0_log_records_and_deleted_records.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_version0_log_records_and_deleted_records.cs index 6d2428771a1..3ca64ed8b28 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_version0_log_records_and_deleted_records.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_version0_log_records_and_deleted_records.cs @@ -59,10 +59,10 @@ public void should_be_able_to_read_the_all_stream() { public void should_have_updated_deleted_stream_event_number() { var chunk = Db.Manager.GetChunk(0); var chunkRecords = new List(); - RecordReadResult result = chunk.TryReadFirst(); + RecordReadResult result = chunk.TryReadFirst(ITransactionFileTracker.NoOp); while (result.Success) { chunkRecords.Add(result.LogRecord); - result = chunk.TryReadClosestForward(result.NextPosition); + result = chunk.TryReadClosestForward(result.NextPosition, ITransactionFileTracker.NoOp); } var deletedRecord = (PrepareLogRecord)chunkRecords.First(x => x.RecordType == LogRecordType.Prepare @@ -77,10 +77,10 @@ public void should_have_updated_deleted_stream_event_number() { public void the_log_records_are_still_version_0() { var chunk = Db.Manager.GetChunk(0); var chunkRecords = new List(); - RecordReadResult result = chunk.TryReadFirst(); + RecordReadResult result = chunk.TryReadFirst(ITransactionFileTracker.NoOp); while (result.Success) { chunkRecords.Add(result.LogRecord); - result = chunk.TryReadClosestForward(result.NextPosition); + result = chunk.TryReadClosestForward(result.NextPosition, ITransactionFileTracker.NoOp); } Assert.IsTrue(chunkRecords.All(x => x.Version == LogRecordVersion.LogRecordV0)); diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_version0_log_records_using_transactions.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_version0_log_records_using_transactions.cs index 04fde8383dd..84bf5d8664e 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_version0_log_records_using_transactions.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_version0_log_records_using_transactions.cs @@ -105,10 +105,10 @@ public void the_log_records_are_still_version_0_in_first_chunk() { var chunk = Db.Manager.GetChunk(0); var chunkRecords = new List(); - RecordReadResult result = chunk.TryReadFirst(); + RecordReadResult result = chunk.TryReadFirst(ITransactionFileTracker.NoOp); while (result.Success) { chunkRecords.Add(result.LogRecord); - result = chunk.TryReadClosestForward(result.NextPosition); + result = chunk.TryReadClosestForward(result.NextPosition, ITransactionFileTracker.NoOp); } Assert.IsTrue(chunkRecords.All(x => x.Version == LogRecordVersion.LogRecordV0)); @@ -120,10 +120,10 @@ public void the_log_records_are_unchanged_in_second_chunk() { var chunk = Db.Manager.GetChunk(1); var chunkRecords = new List(); - RecordReadResult result = chunk.TryReadFirst(); + RecordReadResult result = chunk.TryReadFirst(ITransactionFileTracker.NoOp); while (result.Success) { chunkRecords.Add(result.LogRecord); - result = chunk.TryReadClosestForward(result.NextPosition); + result = chunk.TryReadClosestForward(result.NextPosition, ITransactionFileTracker.NoOp); } Assert.IsTrue(chunkRecords.All(x => x.Version == LogRecordVersion.LogRecordV0)); diff --git a/src/EventStore.Core.Tests/Services/Storage/SimpleDbTestScenario.cs b/src/EventStore.Core.Tests/Services/Storage/SimpleDbTestScenario.cs index 13ffa55e278..052b701b6b9 100644 --- a/src/EventStore.Core.Tests/Services/Storage/SimpleDbTestScenario.cs +++ b/src/EventStore.Core.Tests/Services/Storage/SimpleDbTestScenario.cs @@ -62,7 +62,7 @@ public override async Task TestFixtureSetUp() { var emptyStreamId = _logFormat.EmptyStreamId; TableIndex = new TableIndex(indexDirectory, lowHasher, highHasher, emptyStreamId, () => new HashListMemTable(PTableVersions.IndexV2, MaxEntriesInMemTable * 2), - () => new TFReaderLease(readers), + tracker => new TFReaderLease(readers, tracker), PTableVersions.IndexV2, int.MaxValue, Constants.PTableMaxReaderCountDefault, diff --git a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_rebuilding_index_for_partially_persisted_transaction.cs b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_rebuilding_index_for_partially_persisted_transaction.cs index 45f9f316bfa..af4e99b44f0 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_rebuilding_index_for_partially_persisted_transaction.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_rebuilding_index_for_partially_persisted_transaction.cs @@ -36,7 +36,7 @@ public override async Task TestFixtureSetUp() { var emptyStreamId = _logFormat.EmptyStreamId; TableIndex = new TableIndex(GetFilePathFor("index"), lowHasher, highHasher, emptyStreamId, () => new HashListMemTable(PTableVersions.IndexV2, maxSize: MaxEntriesInMemTable * 2), - () => new TFReaderLease(readers), + tracker => new TFReaderLease(readers, tracker), PTableVersions.IndexV2, 5, Constants.PTableMaxReaderCountDefault, MaxEntriesInMemTable); diff --git a/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeTestScenario.cs b/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeTestScenario.cs index aff0f7aadbb..7001fb1b3da 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeTestScenario.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeTestScenario.cs @@ -65,7 +65,7 @@ public override async Task TestFixtureSetUp() { var emptyStreamId = _logFormat.EmptyStreamId; var tableIndex = new TableIndex(indexDirectory, lowHasher, highHasher, emptyStreamId, () => new HashListMemTable(PTableVersions.IndexV3, maxSize: 200), - () => new TFReaderLease(readerPool), + tracker => new TFReaderLease(readerPool, tracker), PTableVersions.IndexV3, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 100, @@ -121,10 +121,10 @@ protected void CheckRecords() { var chunk = _dbResult.Db.Manager.GetChunk(i); var chunkRecords = new List(); - RecordReadResult result = chunk.TryReadFirst(); + RecordReadResult result = chunk.TryReadFirst(ITransactionFileTracker.NoOp); while (result.Success) { chunkRecords.Add(result.LogRecord); - result = chunk.TryReadClosestForward((int)result.NextPosition); + result = chunk.TryReadClosestForward((int)result.NextPosition, ITransactionFileTracker.NoOp); } Assert.AreEqual(_keptRecords[i].Length, chunkRecords.Count, "Wrong number of records in chunk #{0}", i); diff --git a/src/EventStore.Core.Tests/TransactionLog/Truncation/TruncateAndReOpenDbScenario.cs b/src/EventStore.Core.Tests/TransactionLog/Truncation/TruncateAndReOpenDbScenario.cs index 79c761851cc..50d6d32a65d 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Truncation/TruncateAndReOpenDbScenario.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Truncation/TruncateAndReOpenDbScenario.cs @@ -43,7 +43,7 @@ private void ReOpenDb() { var emptyStreamId = _logFormat.EmptyStreamId; TableIndex = new TableIndex(indexDirectory, lowHasher, highHasher, emptyStreamId, () => new HashListMemTable(PTableVersions.IndexV3, MaxEntriesInMemTable * 2), - () => new TFReaderLease(readers), + tracker => new TFReaderLease(readers, tracker), PTableVersions.IndexV3, int.MaxValue, Constants.PTableMaxReaderCountDefault, diff --git a/src/EventStore.Core.Tests/TransactionLog/when_appending_to_a_tfchunk_and_flushing.cs b/src/EventStore.Core.Tests/TransactionLog/when_appending_to_a_tfchunk_and_flushing.cs index 988bd7840f0..6c859a395e9 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_appending_to_a_tfchunk_and_flushing.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_appending_to_a_tfchunk_and_flushing.cs @@ -68,7 +68,7 @@ public void the_record_can_be_read_at_exact_position() { [Test] public void the_record_can_be_read_as_first_one() { - var res = _chunk.TryReadFirst(); + var res = _chunk.TryReadFirst(ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(_record, res.LogRecord); Assert.AreEqual(_record.GetSizeWithLengthPrefixAndSuffix(), res.NextPosition); @@ -76,7 +76,7 @@ public void the_record_can_be_read_as_first_one() { [Test] public void the_record_can_be_read_as_closest_forward_to_pos_zero() { - var res = _chunk.TryReadClosestForward(0); + var res = _chunk.TryReadClosestForward(0, ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(_record, res.LogRecord); Assert.AreEqual(_record.GetSizeWithLengthPrefixAndSuffix(), res.NextPosition); diff --git a/src/EventStore.Core.Tests/TransactionLog/when_creating_tfchunk_from_empty_file.cs b/src/EventStore.Core.Tests/TransactionLog/when_creating_tfchunk_from_empty_file.cs index 12252ee5739..4ecca8c2e97 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_creating_tfchunk_from_empty_file.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_creating_tfchunk_from_empty_file.cs @@ -1,5 +1,6 @@ using System; using System.IO; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks.TFChunk; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; @@ -49,19 +50,19 @@ public void there_is_no_record_at_pos_zero() { [Test] public void there_is_no_first_record() { - var res = _chunk.TryReadFirst(); + var res = _chunk.TryReadFirst(ITransactionFileTracker.NoOp); Assert.IsFalse(res.Success); } [Test] public void there_is_no_closest_forward_record_to_pos_zero() { - var res = _chunk.TryReadClosestForward(0); + var res = _chunk.TryReadClosestForward(0, ITransactionFileTracker.NoOp); Assert.IsFalse(res.Success); } [Test] public void there_is_no_closest_backward_record_from_end() { - var res = _chunk.TryReadClosestForward(0); + var res = _chunk.TryReadClosestForward(0, ITransactionFileTracker.NoOp); Assert.IsFalse(res.Success); } diff --git a/src/EventStore.Core.Tests/TransactionLog/when_having_scavenged_tfchunk_with_all_records_removed.cs b/src/EventStore.Core.Tests/TransactionLog/when_having_scavenged_tfchunk_with_all_records_removed.cs index 647a766cccb..2cc906d582b 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_having_scavenged_tfchunk_with_all_records_removed.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_having_scavenged_tfchunk_with_all_records_removed.cs @@ -153,10 +153,10 @@ public void commit3_cant_be_read_at_position() { [Test] public void sequencial_read_returns_no_records() { var records = new List(); - RecordReadResult res = _scavengedChunk.TryReadFirst(); + RecordReadResult res = _scavengedChunk.TryReadFirst(ITransactionFileTracker.NoOp); while (res.Success) { records.Add(res.LogRecord); - res = _scavengedChunk.TryReadClosestForward((int)res.NextPosition); + res = _scavengedChunk.TryReadClosestForward((int)res.NextPosition, ITransactionFileTracker.NoOp); } if (LogFormatHelper.IsV2) { diff --git a/src/EventStore.Core.Tests/TransactionLog/when_reading_cached_empty_scavenged_tfchunk.cs b/src/EventStore.Core.Tests/TransactionLog/when_reading_cached_empty_scavenged_tfchunk.cs index 086e5cdf6ae..ac1e76a795c 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_reading_cached_empty_scavenged_tfchunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_reading_cached_empty_scavenged_tfchunk.cs @@ -1,3 +1,4 @@ +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks.TFChunk; using NUnit.Framework; @@ -27,12 +28,12 @@ public void no_record_at_exact_position_can_be_read() { [Test] public void no_record_can_be_read_as_first_record() { - Assert.IsFalse(_chunk.TryReadFirst().Success); + Assert.IsFalse(_chunk.TryReadFirst(ITransactionFileTracker.NoOp).Success); } [Test] public void no_record_can_be_read_as_closest_forward_record() { - Assert.IsFalse(_chunk.TryReadClosestForward(0).Success); + Assert.IsFalse(_chunk.TryReadClosestForward(0, ITransactionFileTracker.NoOp).Success); } [Test] diff --git a/src/EventStore.Core.Tests/TransactionLog/when_reading_from_a_cached_tfchunk.cs b/src/EventStore.Core.Tests/TransactionLog/when_reading_from_a_cached_tfchunk.cs index 749488a4a6c..abe91233678 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_reading_from_a_cached_tfchunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_reading_from_a_cached_tfchunk.cs @@ -63,7 +63,7 @@ public void the_record_can_be_read_at_exact_position() { [Test] public void the_record_can_be_read_as_first_record() { - var res = _cachedChunk.TryReadFirst(); + var res = _cachedChunk.TryReadFirst(ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(_record.GetSizeWithLengthPrefixAndSuffix(), res.NextPosition); Assert.AreEqual(_record, res.LogRecord); @@ -72,7 +72,7 @@ public void the_record_can_be_read_as_first_record() { [Test] public void the_record_can_be_read_as_closest_forward_to_zero_pos() { - var res = _cachedChunk.TryReadClosestForward(0); + var res = _cachedChunk.TryReadClosestForward(0, ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(_record.GetSizeWithLengthPrefixAndSuffix(), res.NextPosition); Assert.AreEqual(_record, res.LogRecord); diff --git a/src/EventStore.Core.Tests/TransactionLog/when_reading_uncached_empty_scavenged_tfchunk.cs b/src/EventStore.Core.Tests/TransactionLog/when_reading_uncached_empty_scavenged_tfchunk.cs index 19686c9d20c..e27b6f707b3 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_reading_uncached_empty_scavenged_tfchunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_reading_uncached_empty_scavenged_tfchunk.cs @@ -1,3 +1,4 @@ +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks.TFChunk; using NUnit.Framework; @@ -26,12 +27,12 @@ public void no_record_at_exact_position_can_be_read() { [Test] public void no_record_can_be_read_as_first_record() { - Assert.IsFalse(_chunk.TryReadFirst().Success); + Assert.IsFalse(_chunk.TryReadFirst(ITransactionFileTracker.NoOp).Success); } [Test] public void no_record_can_be_read_as_closest_forward_record() { - Assert.IsFalse(_chunk.TryReadClosestForward(0).Success); + Assert.IsFalse(_chunk.TryReadClosestForward(0, ITransactionFileTracker.NoOp).Success); } [Test] diff --git a/src/EventStore.Core.Tests/TransactionLog/when_writing_multiple_records_to_a_tfchunk.cs b/src/EventStore.Core.Tests/TransactionLog/when_writing_multiple_records_to_a_tfchunk.cs index ef0dbb2f01c..baafbe212c0 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_writing_multiple_records_to_a_tfchunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_writing_multiple_records_to_a_tfchunk.cs @@ -1,4 +1,5 @@ using System; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks.TFChunk; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; @@ -82,7 +83,7 @@ public void the_second_record_can_be_read_at_position() { [Test] public void the_first_record_can_be_read() { - var res = _chunk.TryReadFirst(); + var res = _chunk.TryReadFirst(ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(_prepare1.GetSizeWithLengthPrefixAndSuffix(), res.NextPosition); Assert.IsTrue(res.LogRecord is IPrepareLogRecord); @@ -91,7 +92,7 @@ public void the_first_record_can_be_read() { [Test] public void the_second_record_can_be_read_as_closest_forward_after_first() { - var res = _chunk.TryReadClosestForward(_prepare1.GetSizeWithLengthPrefixAndSuffix()); + var res = _chunk.TryReadClosestForward(_prepare1.GetSizeWithLengthPrefixAndSuffix(), ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(_prepare1.GetSizeWithLengthPrefixAndSuffix() + _prepare2.GetSizeWithLengthPrefixAndSuffix(), res.NextPosition); @@ -102,7 +103,7 @@ public void the_second_record_can_be_read_as_closest_forward_after_first() { [Test] public void cannot_read_past_second_record_with_closest_forward_method() { var res = _chunk.TryReadClosestForward(_prepare1.GetSizeWithLengthPrefixAndSuffix() - + _prepare2.GetSizeWithLengthPrefixAndSuffix()); + + _prepare2.GetSizeWithLengthPrefixAndSuffix(), ITransactionFileTracker.NoOp); Assert.IsFalse(res.Success); } diff --git a/src/EventStore.Core.XUnit.Tests/LogAbstraction/LogFormatAbstractorV3Tests.cs b/src/EventStore.Core.XUnit.Tests/LogAbstraction/LogFormatAbstractorV3Tests.cs index 3d90467e87d..2e8075b6985 100644 --- a/src/EventStore.Core.XUnit.Tests/LogAbstraction/LogFormatAbstractorV3Tests.cs +++ b/src/EventStore.Core.XUnit.Tests/LogAbstraction/LogFormatAbstractorV3Tests.cs @@ -377,7 +377,7 @@ public long GetStreamLastEventNumber_NoCollisions(ulong stream, Func : IIndexBackend { - public TFReaderLease BorrowReader() { + public TFReaderLease BorrowReader(ITransactionFileTracker tracker) { throw new NotImplementedException(); } diff --git a/src/EventStore.Core.XUnit.Tests/LogV2/LogV2StreamExistenceFilterInitializerTests.cs b/src/EventStore.Core.XUnit.Tests/LogV2/LogV2StreamExistenceFilterInitializerTests.cs index 13ee44540e8..220cb1aefae 100644 --- a/src/EventStore.Core.XUnit.Tests/LogV2/LogV2StreamExistenceFilterInitializerTests.cs +++ b/src/EventStore.Core.XUnit.Tests/LogV2/LogV2StreamExistenceFilterInitializerTests.cs @@ -27,14 +27,14 @@ public LogV2StreamExistenceFilterInitializerTests() { version: PTableVersions.IndexV4, maxSize: 1_000_000 * 2), maxSizeForMemory: 100_000, - tfReaderFactory: () => new TFReaderLease(_log), + tfReaderFactory: tracker => new TFReaderLease(_log), ptableVersion: PTableVersions.IndexV4, maxAutoMergeIndexLevel: int.MaxValue, pTableMaxReaderCount: 5); _tableIndex.Initialize(0); _sut = new LogV2StreamExistenceFilterInitializer( - tfReaderFactory: () => new TFReaderLease(_log), + tfReaderFactory: tracker => new TFReaderLease(_log), tableIndex: _tableIndex); var hasher = new CompositeHasher(new XXHashUnsafe(), new Murmur3AUnsafe()); _filter = new MockExistenceFilter(hasher); @@ -180,14 +180,14 @@ public void cannot_initialize_with_v1_indexes() { memTableFactory: () => new HashListMemTable( version: PTableVersions.IndexV1, maxSize: 1_000_000 * 2), - tfReaderFactory: () => throw new Exception("index tried to read the log"), + tfReaderFactory: _ => throw new Exception("index tried to read the log"), ptableVersion: PTableVersions.IndexV1, maxAutoMergeIndexLevel: int.MaxValue, pTableMaxReaderCount: 5); tableIndex.Initialize(0); var sut = new LogV2StreamExistenceFilterInitializer( - tfReaderFactory: () => throw new Exception("initializer tried to read the log"), + tfReaderFactory: _ => throw new Exception("initializer tried to read the log"), tableIndex: tableIndex); var filter = new MockExistenceFilter(hasher: null); diff --git a/src/EventStore.Core.XUnit.Tests/LogV3/PartitionManagerTests.cs b/src/EventStore.Core.XUnit.Tests/LogV3/PartitionManagerTests.cs index e488be40708..542cd303db1 100644 --- a/src/EventStore.Core.XUnit.Tests/LogV3/PartitionManagerTests.cs +++ b/src/EventStore.Core.XUnit.Tests/LogV3/PartitionManagerTests.cs @@ -213,7 +213,15 @@ public FakeReader(Guid? rootPartitionId, Guid? rootPartitionTypeId, bool without _results.Add(new SeqReadResult(true, false, rootPartition, 0, 0, 0)); } } - + + public void OnCheckedOut(ITransactionFileTracker tracker) { + throw new NotImplementedException(); + } + + public void OnReturned() { + throw new NotImplementedException(); + } + public void Reposition(long position) { _resultIndex = (int) position; } diff --git a/src/EventStore.Core.XUnit.Tests/Scavenge/Infrastructure/Scenario.cs b/src/EventStore.Core.XUnit.Tests/Scavenge/Infrastructure/Scenario.cs index 59a1f09df05..f086f63c357 100644 --- a/src/EventStore.Core.XUnit.Tests/Scavenge/Infrastructure/Scenario.cs +++ b/src/EventStore.Core.XUnit.Tests/Scavenge/Infrastructure/Scenario.cs @@ -254,7 +254,7 @@ private async Task RunInternalAsync( highHasher: highHasher, emptyStreamId: logFormat.EmptyStreamId, memTableFactory: () => new HashListMemTable(PTableVersions.IndexV4, maxSize: 200), - tfReaderFactory: () => new TFReaderLease(readerPool), + tfReaderFactory: tracker => new TFReaderLease(readerPool, tracker), ptableVersion: PTableVersions.IndexV4, maxAutoMergeIndexLevel: int.MaxValue, pTableMaxReaderCount: ESConsts.PTableInitialReaderCount, @@ -331,7 +331,7 @@ private async Task RunInternalAsync( var calculatorIndexReader = new AdHocIndexReaderInterceptor( new IndexReaderForCalculator( readIndex, - () => new TFReaderLease(readerPool), + tracker => new TFReaderLease(readerPool, tracker), scavengeState.LookupUniqueHashUser), (f, handle, from, maxCount, x) => { if (_calculatingCancellationTrigger != null) @@ -419,7 +419,7 @@ private async Task RunInternalAsync( IIndexExecutor indexExecutor = new IndexExecutor( logger: logger, indexScavenger: cancellationWrappedIndexScavenger, - streamLookup: new ChunkReaderForIndexExecutor(() => new TFReaderLease(readerPool)), + streamLookup: new ChunkReaderForIndexExecutor(tracker => new TFReaderLease(readerPool, tracker)), unsafeIgnoreHardDeletes: _unsafeIgnoreHardDeletes, restPeriod: restPeriod, throttle: throttle); @@ -574,10 +574,10 @@ protected static void CheckRecords(ILogRecord[][] expected, DbResult actual) { var chunk = actual.Db.Manager.GetChunk(i); var chunkRecords = new List(); - var result = chunk.TryReadFirst(); + var result = chunk.TryReadFirst(ITransactionFileTracker.NoOp); while (result.Success) { chunkRecords.Add(result.LogRecord); - result = chunk.TryReadClosestForward((int)result.NextPosition); + result = chunk.TryReadClosestForward((int)result.NextPosition, ITransactionFileTracker.NoOp); } Assert.True( diff --git a/src/EventStore.Core/ClusterVNode.cs b/src/EventStore.Core/ClusterVNode.cs index 49fb2e004e3..540d0c12a36 100644 --- a/src/EventStore.Core/ClusterVNode.cs +++ b/src/EventStore.Core/ClusterVNode.cs @@ -596,7 +596,7 @@ TFChunkDbConfig CreateDbConfig( MaxReaderCount = pTableMaxReaderCount, StreamExistenceFilterSize = options.Database.StreamExistenceFilterSize, StreamExistenceFilterCheckpoint = Db.Config.StreamExistenceFilterCheckpoint, - TFReaderLeaseFactory = () => new TFReaderLease(readerPool) + TFReaderLeaseFactory = tracker => new TFReaderLease(readerPool, tracker) }); ICacheResizer streamInfoCacheResizer; @@ -642,7 +642,7 @@ TFChunkDbConfig CreateDbConfig( logFormat.EmptyStreamId, () => new HashListMemTable(options.IndexBitnessVersion, maxSize: options.Database.MaxMemTableSize * 2), - () => new TFReaderLease(readerPool), + tracker => new TFReaderLease(readerPool, tracker), options.IndexBitnessVersion, maxSizeForMemory: options.Database.MaxMemTableSize, maxTablesPerLevel: 2, @@ -1294,7 +1294,7 @@ GossipAdvertiseInfo GetGossipAdvertiseInfo() { logger: logger, new IndexReaderForCalculator( readIndex, - () => new TFReaderLease(readerPool), + tracker => new TFReaderLease(readerPool, tracker), state.LookupUniqueHashUser), chunkSize: TFConsts.ChunkSize, cancellationCheckPeriod: cancellationCheckPeriod, @@ -1320,7 +1320,7 @@ GossipAdvertiseInfo GetGossipAdvertiseInfo() { var indexExecutor = new IndexExecutor( logger, new IndexScavenger(tableIndex), - new ChunkReaderForIndexExecutor(() => new TFReaderLease(readerPool)), + new ChunkReaderForIndexExecutor(tracker => new TFReaderLease(readerPool, tracker)), unsafeIgnoreHardDeletes: options.Database.UnsafeIgnoreHardDelete, restPeriod: 32_768, throttle: throttle); diff --git a/src/EventStore.Core/Index/TableIndex.cs b/src/EventStore.Core/Index/TableIndex.cs index b4185264e09..f3cd614f4a4 100644 --- a/src/EventStore.Core/Index/TableIndex.cs +++ b/src/EventStore.Core/Index/TableIndex.cs @@ -49,7 +49,7 @@ public long PrepareCheckpoint { private readonly byte _ptableVersion; private readonly string _directory; private readonly Func _memTableFactory; - private readonly Func _tfReaderFactory; + private readonly Func _tfReaderFactory; private readonly IIndexFilenameProvider _fileNameProvider; private readonly IIndexStatusTracker _statusTracker; @@ -79,7 +79,7 @@ public TableIndex(string directory, IHasher highHasher, TStreamId emptyStreamId, Func memTableFactory, - Func tfReaderFactory, + Func tfReaderFactory, byte ptableVersion, int maxAutoMergeIndexLevel, int pTableMaxReaderCount, @@ -310,7 +310,7 @@ private void ReadOffQueue() { Log.Debug("Performing manual index merge."); _isManualMergePending = false; - using (var reader = _tfReaderFactory()) { + using (var reader = _tfReaderFactory(ITransactionFileTracker.NoOp)) { //qq var manualMergeResult = _indexMap.TryManualMerge( (streamId, currentHash) => UpgradeHash(streamId, currentHash), entry => reader.ExistsAt(entry.Position), @@ -361,7 +361,7 @@ private void ReadOffQueue() { _indexMap.SaveToFile(indexmapFile); if (addResult.CanMergeAny) { - using (var reader = _tfReaderFactory()) { + using (var reader = _tfReaderFactory(ITransactionFileTracker.NoOp)) { //qq MergeResult mergeResult; do { mergeResult = _indexMap.TryMergeOneLevel( @@ -464,7 +464,7 @@ private void ScavengeInternal( try { ct.ThrowIfCancellationRequested(); - using (var reader = _tfReaderFactory()) { + using (var reader = _tfReaderFactory(ITransactionFileTracker.NoOp)) { //qq var indexmapFile = Path.Combine(_directory, IndexMapFilename); Func existsAt = entry => reader.ExistsAt(entry.Position); diff --git a/src/EventStore.Core/LogAbstraction/LogFormatAbstractor.cs b/src/EventStore.Core/LogAbstraction/LogFormatAbstractor.cs index 2362f44506a..9bb69bd7739 100644 --- a/src/EventStore.Core/LogAbstraction/LogFormatAbstractor.cs +++ b/src/EventStore.Core/LogAbstraction/LogFormatAbstractor.cs @@ -21,7 +21,7 @@ public record LogFormatAbstractorOptions { public ICheckpoint StreamExistenceFilterCheckpoint { get; init; } public TimeSpan StreamExistenceFilterCheckpointInterval { get; init; } = TimeSpan.FromSeconds(30); public TimeSpan StreamExistenceFilterCheckpointDelay { get; init; } = TimeSpan.FromSeconds(5); - public Func TFReaderLeaseFactory { get; init; } + public Func TFReaderLeaseFactory { get; init; } } public interface ILogFormatAbstractorFactory { diff --git a/src/EventStore.Core/LogV2/LogV2StreamExistenceFilterInitializer.cs b/src/EventStore.Core/LogV2/LogV2StreamExistenceFilterInitializer.cs index c447889d83c..527e5ae33db 100644 --- a/src/EventStore.Core/LogV2/LogV2StreamExistenceFilterInitializer.cs +++ b/src/EventStore.Core/LogV2/LogV2StreamExistenceFilterInitializer.cs @@ -21,13 +21,13 @@ namespace EventStore.Core.LogV2 { /// of the previous record, which is fine. the net effect is an extra record is initialized /// on startup next time. public class LogV2StreamExistenceFilterInitializer : INameExistenceFilterInitializer { - private readonly Func _tfReaderFactory; + private readonly Func _tfReaderFactory; private readonly ITableIndex _tableIndex; protected static readonly ILogger Log = Serilog.Log.ForContext(); public LogV2StreamExistenceFilterInitializer( - Func tfReaderFactory, + Func tfReaderFactory, ITableIndex tableIndex) { Ensure.NotNull(tableIndex, nameof(tableIndex)); @@ -134,7 +134,7 @@ private void InitializeFromLog(INameExistenceFilter filter) { // whether the checkpoint is the pre or post position of the last processed record. var startPosition = filter.CurrentCheckpoint == -1 ? 0 : filter.CurrentCheckpoint; Log.Information("Initializing from log starting at {startPosition:N0}", startPosition); - using var reader = _tfReaderFactory(); + using var reader = _tfReaderFactory(ITransactionFileTracker.NoOp); //qq reader.Reposition(startPosition); while (TryReadNextLogRecord(reader, out var result)) { diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/AllReader.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/AllReader.cs index f346950066d..2ba411415df 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/AllReader.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/AllReader.cs @@ -74,7 +74,7 @@ private IndexReadAllResult ReadAllEventsForwardInternal(TFPos pos, int maxCount, var consideredEventsCount = 0L; var firstCommit = true; var reachedEndOfStream = false; - using (var reader = _backend.BorrowReader()) { + using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qqqqqqqqqqqqqqqqq push here long nextCommitPos = pos.CommitPosition; while (records.Count < maxCount && consideredEventsCount < maxSearchWindow) { if (nextCommitPos > _indexCommitter.LastIndexedPosition) { @@ -201,7 +201,7 @@ private IndexReadAllResult ReadAllEventsBackwardInternal(TFPos pos, int maxCount var consideredEventsCount = 0L; bool firstCommit = true; var reachedEndOfStream = false; - using (var reader = _backend.BorrowReader()) { + using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq long nextCommitPostPos = pos.CommitPosition; while (records.Count < maxCount && consideredEventsCount < maxSearchWindow) { reader.Reposition(nextCommitPostPos); diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/IndexBackend.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/IndexBackend.cs index f093bfefdeb..d6357d2a70d 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/IndexBackend.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/IndexBackend.cs @@ -12,7 +12,7 @@ namespace EventStore.Core.Services.Storage.ReaderIndex { public interface IIndexBackend { - TFReaderLease BorrowReader(); + TFReaderLease BorrowReader(ITransactionFileTracker tracker); void SetSystemSettings(SystemSettings systemSettings); SystemSettings GetSystemSettings(); } @@ -48,8 +48,8 @@ public IndexBackend( _streamMetadataCache = streamMetadataCache; } - public TFReaderLease BorrowReader() { - return new TFReaderLease(_readers); + public TFReaderLease BorrowReader(ITransactionFileTracker tracker) { + return new TFReaderLease(_readers, tracker); } public EventNumberCached TryGetStreamLastEventNumber(TStreamId streamId) { diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/IndexCommitter.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/IndexCommitter.cs index 77cd4c48e8c..4ed8606351b 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/IndexCommitter.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/IndexCommitter.cs @@ -131,7 +131,7 @@ public void Init(long buildToPosition) { _indexRebuild = true; using (_statusTracker.StartRebuilding()) - using (var reader = _backend.BorrowReader()) { + using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq var startPosition = Math.Max(0, _persistedCommitPos); var fullRebuild = startPosition == 0; reader.Reposition(startPosition); @@ -450,7 +450,7 @@ public long Commit(IList> commitedPrepares, bool is } private IEnumerable> GetTransactionPrepares(long transactionPos, long commitPos) { - using (var reader = _backend.BorrowReader()) { + using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq reader.Reposition(transactionPos); // in case all prepares were scavenged, we should not read past Commit LogPosition @@ -487,7 +487,7 @@ private void CheckStreamVersion(TStreamId streamId, long newEventNumber, CommitL private void CheckDuplicateEvents(TStreamId streamId, CommitLogRecord commit, IList> indexEntries, IList> prepares) { - using (var reader = _backend.BorrowReader()) { + using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq var entries = _tableIndex.GetRange(streamId, indexEntries[0].Version, indexEntries[indexEntries.Count - 1].Version); foreach (var indexEntry in entries) { diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/IndexReader.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/IndexReader.cs index 271434fe48c..8b9689c7cb9 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/IndexReader.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/IndexReader.cs @@ -109,7 +109,7 @@ IndexReadEventResult IIndexReader.ReadEvent(string streamName, TStrea Ensure.Valid(streamId, _validator); if (eventNumber < -1) throw new ArgumentOutOfRangeException("eventNumber"); - using (var reader = _backend.BorrowReader()) { + using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq return ReadEventInternal(reader, streamName, streamId, eventNumber); } } @@ -155,7 +155,7 @@ private IndexReadEventResult ReadEventInternal(TFReaderLease reader, string stre } IPrepareLogRecord IIndexReader.ReadPrepare(TStreamId streamId, long eventNumber) { - using (var reader = _backend.BorrowReader()) { + using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq return ReadPrepareInternal(reader, streamId, eventNumber); } } @@ -226,7 +226,7 @@ private IndexReadStreamResult ReadStreamEventsForwardInternal(string streamName, Ensure.Nonnegative(fromEventNumber, "fromEventNumber"); Ensure.Positive(maxCount, "maxCount"); - using (var reader = _backend.BorrowReader()) { + using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq var lastEventNumber = GetStreamLastEventNumberCached(reader, streamId); var metadata = GetStreamMetadataCached(reader, streamId); if (lastEventNumber == EventNumber.DeletedStream) @@ -503,7 +503,7 @@ delegate IEnumerable ReadIndexEntries( indexReader._tableIndex.GetRange(streamHandle, startEventNumber, endEventNumber); public IndexReadEventInfoResult ReadEventInfo_KeepDuplicates(TStreamId streamId, long eventNumber) { - using (var reader = _backend.BorrowReader()) { + using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq var result = ReadEventInfoForwardInternal( streamId, reader, @@ -522,7 +522,7 @@ public IndexReadEventInfoResult ReadEventInfo_KeepDuplicates(TStreamId streamId, // note for simplicity skipIndexScanOnRead is always treated as false. see ReadEventInfoInternal public IndexReadEventInfoResult ReadEventInfoForward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, long beforePosition) { - using (var reader = _backend.BorrowReader()) { + using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq return ReadEventInfoForwardInternal( streamId, reader, @@ -602,7 +602,7 @@ private IndexReadStreamResult ReadStreamEventsBackwardInternal(string streamName Ensure.Valid(streamId, _validator); Ensure.Positive(maxCount, "maxCount"); - using (var reader = _backend.BorrowReader()) { + using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq var lastEventNumber = GetStreamLastEventNumberCached(reader, streamId); var metadata = GetStreamMetadataCached(reader, streamId); if (lastEventNumber == EventNumber.DeletedStream) @@ -668,7 +668,7 @@ public IndexReadEventInfoResult ReadEventInfoBackward_KnownCollisions(TStreamId if (fromEventNumber == ExpectedVersion.NoStream) return new IndexReadEventInfoResult(new EventInfo[] { }, -1); - using (var reader = _backend.BorrowReader()) { + using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq return ReadEventInfoBackwardInternal( streamId, reader, @@ -802,14 +802,14 @@ private EventInfo[] ReadEventInfoInternal( public TStreamId GetEventStreamIdByTransactionId(long transactionId) { Ensure.Nonnegative(transactionId, "transactionId"); - using (var reader = _backend.BorrowReader()) { + using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq var res = ReadPrepareInternal(reader, transactionId); return res == null ? default : res.EventStreamId; } } public StorageMessage.EffectiveAcl GetEffectiveAcl(TStreamId streamId) { - using (var reader = _backend.BorrowReader()) { + using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq var sysSettings = _backend.GetSystemSettings() ?? SystemSettings.Default; StreamAcl acl; StreamAcl sysAcl; @@ -830,14 +830,14 @@ public StorageMessage.EffectiveAcl GetEffectiveAcl(TStreamId streamId) { long IIndexReader.GetStreamLastEventNumber(TStreamId streamId) { Ensure.Valid(streamId, _validator); - using (var reader = _backend.BorrowReader()) { + using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq return GetStreamLastEventNumberCached(reader, streamId); } } public long GetStreamLastEventNumber_KnownCollisions(TStreamId streamId, long beforePosition) { Ensure.Valid(streamId, _validator); - using (var reader = _backend.BorrowReader()) { + using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq return GetStreamLastEventNumber_KnownCollisions(streamId, beforePosition, reader); } } @@ -859,7 +859,7 @@ bool IsForThisStream(IndexEntry indexEntry) { // gets the last event number before beforePosition for the given stream hash. can assume that // the hash does not collide with anything before beforePosition. public long GetStreamLastEventNumber_NoCollisions(ulong stream, Func getStreamId, long beforePosition) { - using (var reader = _backend.BorrowReader()) { + using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq return GetStreamLastEventNumber_NoCollisions(stream, getStreamId, beforePosition, reader); } } @@ -894,7 +894,7 @@ bool IsForThisStream(IndexEntry indexEntry) { StreamMetadata IIndexReader.GetStreamMetadata(TStreamId streamId) { Ensure.Valid(streamId, _validator); - using (var reader = _backend.BorrowReader()) { + using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq return GetStreamMetadataCached(reader, streamId); } } diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/IndexWriter.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/IndexWriter.cs index b52ef7dd05f..8bc1833304d 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/IndexWriter.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/IndexWriter.cs @@ -123,7 +123,7 @@ public void Reset() { public CommitCheckResult CheckCommitStartingAt(long transactionPosition, long commitPosition) { TStreamId streamId; long expectedVersion; - using (var reader = _indexBackend.BorrowReader()) { + using (var reader = _indexBackend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq try { var prepare = GetPrepare(reader, transactionPosition); if (prepare == null) { @@ -362,7 +362,7 @@ public TransactionInfo GetTransactionInfo(long writerCheckpoint, long private bool GetTransactionInfoUncached(long writerCheckpoint, long transactionId, out TransactionInfo transactionInfo) { - using (var reader = _indexBackend.BorrowReader()) { + using (var reader = _indexBackend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq reader.Reposition(writerCheckpoint); SeqReadResult result; while ((result = reader.TryReadPrev()).Success) { @@ -424,7 +424,7 @@ public void PurgeNotProcessedTransactions(long checkpoint) { } private IEnumerable> GetTransactionPrepares(long transactionPos, long commitPos) { - using (var reader = _indexBackend.BorrowReader()) { + using (var reader = _indexBackend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq reader.Reposition(transactionPos); // in case all prepares were scavenged, we should not read past Commit LogPosition diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs index b3795a5bfaf..68898c82d9a 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs @@ -253,7 +253,8 @@ private void InitCompleted(bool verifyHash, bool optimizeReadSideCache) { SetAttributes(_filename, true); CreateReaderStreams(); - var reader = GetReaderWorkItem(); + //qq come back to whether we want to track this (just reading the header and footer) + var reader = GetReaderWorkItem(ITransactionFileTracker.NoOp); try { _chunkHeader = ReadHeader(reader.Stream); Log.Debug("Opened completed {chunk} as version {version}", _filename, _chunkHeader.Version); @@ -765,12 +766,12 @@ public RecordReadResult TryReadAt(long logicalPosition, bool couldBeScavenged) { return _readSide.TryReadAt(logicalPosition, couldBeScavenged); } - public RecordReadResult TryReadFirst() { - return _readSide.TryReadFirst(); + public RecordReadResult TryReadFirst(ITransactionFileTracker tracker) { + return _readSide.TryReadFirst(tracker); } - public RecordReadResult TryReadClosestForward(long logicalPosition) { - return _readSide.TryReadClosestForward(logicalPosition); + public RecordReadResult TryReadClosestForward(long logicalPosition, ITransactionFileTracker tracker) { + return _readSide.TryReadClosestForward(logicalPosition, tracker); } public RawReadResult TryReadClosestForwardRaw(long logicalPosition, Func getBuffer) { @@ -1081,10 +1082,9 @@ public void WaitForDestroy(int timeoutMs) { throw new TimeoutException(); } - //qq todo always provide a tracker - private ReaderWorkItem GetReaderWorkItem(ITransactionFileTracker tracker = null) { + private ReaderWorkItem GetReaderWorkItem(ITransactionFileTracker tracker) { var item = GetReaderWorkItemImpl(); - item.OnCheckedOut(tracker ?? ITransactionFileTracker.NoOp); + item.OnCheckedOut(tracker); return item; } diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunkReadSide.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunkReadSide.cs index f3dad94211e..0f617b216a0 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunkReadSide.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunkReadSide.cs @@ -17,8 +17,8 @@ public interface IChunkReadSide { bool ExistsAt(long logicalPosition); long GetActualPosition(long logicalPosition); RecordReadResult TryReadAt(long logicalPosition, bool couldBeScavenged); - RecordReadResult TryReadFirst(); - RecordReadResult TryReadClosestForward(long logicalPosition); + RecordReadResult TryReadFirst(ITransactionFileTracker tracker); + RecordReadResult TryReadClosestForward(long logicalPosition, ITransactionFileTracker tracker); RawReadResult TryReadClosestForwardRaw(long logicalPosition, Func getBuffer); RecordReadResult TryReadLast(); RecordReadResult TryReadClosestBackward(long logicalPosition); @@ -52,7 +52,7 @@ public long GetActualPosition(long logicalPosition) { } public RecordReadResult TryReadAt(long logicalPosition, bool couldBeScavenged) { - var workItem = Chunk.GetReaderWorkItem(); + var workItem = Chunk.GetReaderWorkItem(ITransactionFileTracker.NoOp); //qq try { if (logicalPosition >= Chunk.LogicalDataSize) { _log.Warning( @@ -70,12 +70,12 @@ public RecordReadResult TryReadAt(long logicalPosition, bool couldBeScavenged) { } } - public RecordReadResult TryReadFirst() { - return TryReadClosestForward(0); + public RecordReadResult TryReadFirst(ITransactionFileTracker tracker) { + return TryReadClosestForward(0, tracker); } - public RecordReadResult TryReadClosestForward(long logicalPosition) { - var workItem = Chunk.GetReaderWorkItem(); + public RecordReadResult TryReadClosestForward(long logicalPosition, ITransactionFileTracker tracker) { + var workItem = Chunk.GetReaderWorkItem(tracker); //qq try { if (logicalPosition >= Chunk.LogicalDataSize) return RecordReadResult.Failure; @@ -91,7 +91,7 @@ public RecordReadResult TryReadClosestForward(long logicalPosition) { } public RawReadResult TryReadClosestForwardRaw(long logicalPosition, Func getBuffer) { - var workItem = Chunk.GetReaderWorkItem(); + var workItem = Chunk.GetReaderWorkItem(ITransactionFileTracker.NoOp); //qq try { if (logicalPosition >= Chunk.LogicalDataSize) return RawReadResult.Failure; @@ -111,7 +111,7 @@ public RecordReadResult TryReadLast() { } public RecordReadResult TryReadClosestBackward(long logicalPosition) { - var workItem = Chunk.GetReaderWorkItem(); + var workItem = Chunk.GetReaderWorkItem(ITransactionFileTracker.NoOp); try { // here we allow actualPosition == _logicalDataSize as we can read backward the very last record that way if (logicalPosition > Chunk.LogicalDataSize) @@ -227,7 +227,7 @@ private InMemoryBloomFilter PopulateBloomFilter() { ReaderWorkItem workItem = null; try { - workItem = Chunk.GetReaderWorkItem(); + workItem = Chunk.GetReaderWorkItem(ITransactionFileTracker.NoOp); //qq foreach (var posMap in ReadPosMap(workItem, 0, mapCount)) { bf.Add(posMap.LogPos); @@ -309,7 +309,7 @@ public bool ExistsAt(long logicalPosition) { if (CacheIsOptimized) return MayExistAt(logicalPosition); - var workItem = Chunk.GetReaderWorkItem(); + var workItem = Chunk.GetReaderWorkItem(ITransactionFileTracker.NoOp); //qq try { var actualPosition = TranslateExactPosition(workItem, logicalPosition); return actualPosition >= 0 && actualPosition < Chunk.PhysicalDataSize; @@ -326,7 +326,7 @@ public bool MayExistAt(long logicalPosition) { public long GetActualPosition(long logicalPosition) { Ensure.Nonnegative(logicalPosition, nameof(logicalPosition)); - var workItem = Chunk.GetReaderWorkItem(); + var workItem = Chunk.GetReaderWorkItem(ITransactionFileTracker.NoOp); //qq try { return TranslateExactPosition(workItem, logicalPosition); } finally { @@ -335,7 +335,7 @@ public long GetActualPosition(long logicalPosition) { } public RecordReadResult TryReadAt(long logicalPosition, bool couldBeScavenged) { - var workItem = Chunk.GetReaderWorkItem(); + var workItem = Chunk.GetReaderWorkItem(ITransactionFileTracker.NoOp); //qq try { var actualPosition = TranslateExactPosition(workItem, logicalPosition); if (actualPosition == -1 || actualPosition >= Chunk.PhysicalDataSize) { @@ -391,15 +391,15 @@ private int TranslateExactWithMidpoints(ReaderWorkItem workItem, Midpoint[] midp return TranslateExactWithoutMidpoints(workItem, pos, recordRange.Lower, recordRange.Upper); } - public RecordReadResult TryReadFirst() { - return TryReadClosestForward(0); + public RecordReadResult TryReadFirst(ITransactionFileTracker tracker) { + return TryReadClosestForward(0, tracker); } - public RecordReadResult TryReadClosestForward(long logicalPosition) { + public RecordReadResult TryReadClosestForward(long logicalPosition, ITransactionFileTracker tracker) { if (Chunk.ChunkFooter.MapCount == 0) return RecordReadResult.Failure; - var workItem = Chunk.GetReaderWorkItem(); + var workItem = Chunk.GetReaderWorkItem(tracker); try { var actualPosition = TranslateClosestForwardPosition(workItem, logicalPosition); if (actualPosition == -1 || actualPosition >= Chunk.PhysicalDataSize) @@ -420,7 +420,7 @@ public RawReadResult TryReadClosestForwardRaw(long logicalPosition, Func= Chunk.PhysicalDataSize) @@ -455,7 +455,7 @@ public RecordReadResult TryReadClosestBackward(long logicalPosition) { if (Chunk.ChunkFooter.MapCount == 0) return RecordReadResult.Failure; - var workItem = Chunk.GetReaderWorkItem(); + var workItem = Chunk.GetReaderWorkItem(ITransactionFileTracker.NoOp); //qq try { var actualPosition = TranslateClosestForwardPosition(workItem, logicalPosition); // here we allow actualPosition == _physicalDataSize as we can read backward the very last record that way diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunkReader.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunkReader.cs index 062bbc0c707..cfc4677cdcc 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunkReader.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunkReader.cs @@ -23,6 +23,8 @@ public long CurrentPosition { private readonly TFChunkReaderExistsAtOptimizer _existsAtOptimizer; private readonly ILogger _log = Log.ForContext(); + private ITransactionFileTracker _tracker = ITransactionFileTracker.NoOp; + public TFChunkReader(TFChunkDb db, IReadOnlyCheckpoint writerCheckpoint, long initialPosition = 0, bool optimizeReadSideCache = false) { Ensure.NotNull(db, "dbConfig"); @@ -38,6 +40,16 @@ public TFChunkReader(TFChunkDb db, IReadOnlyCheckpoint writerCheckpoint, long in _existsAtOptimizer = TFChunkReaderExistsAtOptimizer.Instance; } + //qq are these always called? + //qqqqqq we actually probably dont want to put the tracker in here + public void OnCheckedOut(ITransactionFileTracker tracker) { + _tracker = tracker; + } + + public void OnReturned() { //qq rename, this needs to be called before being returned. same for readerworkitem + _tracker = ITransactionFileTracker.NoOp; + } + public void Reposition(long position) { _curPos = position; } @@ -56,7 +68,7 @@ private SeqReadResult TryReadNextInternal(int retries) { var chunk = _db.Manager.GetChunkFor(pos); RecordReadResult result; try { - result = chunk.TryReadClosestForward(chunk.ChunkHeader.GetLocalLogPosition(pos)); + result = chunk.TryReadClosestForward(chunk.ChunkHeader.GetLocalLogPosition(pos), _tracker); CountRead(chunk.IsCached); } catch (FileBeingDeletedException) { if (retries > MaxRetries) diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunkScavenger.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunkScavenger.cs index 60ba8b47c68..809f3390c6a 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunkScavenger.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunkScavenger.cs @@ -16,6 +16,7 @@ using EventStore.Core.TransactionLog.LogRecords; using EventStore.Core.TransactionLog.Scavenging; using EventStore.LogCommon; +using OpenTelemetry.Trace; using ILogger = Serilog.ILogger; namespace EventStore.Core.TransactionLog.Chunks { @@ -727,13 +728,13 @@ private bool IsSoftDeletedTempStreamWithinSameChunk(TStreamId eventStreamId, lon private static void TraverseChunkBasic(TFChunk.TFChunk chunk, CancellationToken ct, Action process) { - var result = chunk.TryReadFirst(); + var result = chunk.TryReadFirst(ITransactionFileTracker.NoOp); //qq while (result.Success) { process(new CandidateRecord(result.LogRecord, result.RecordLength)); ct.ThrowIfCancellationRequested(); - result = chunk.TryReadClosestForward(result.NextPosition); + result = chunk.TryReadClosestForward(result.NextPosition, ITransactionFileTracker.NoOp); //qq } } diff --git a/src/EventStore.Core/TransactionLog/ITransactionFileReader.cs b/src/EventStore.Core/TransactionLog/ITransactionFileReader.cs index f431f9c39a8..0ddfca19ef5 100644 --- a/src/EventStore.Core/TransactionLog/ITransactionFileReader.cs +++ b/src/EventStore.Core/TransactionLog/ITransactionFileReader.cs @@ -3,6 +3,8 @@ namespace EventStore.Core.TransactionLog { public interface ITransactionFileReader { + void OnCheckedOut(ITransactionFileTracker tracker); + void OnReturned(); void Reposition(long position); SeqReadResult TryReadNext(); @@ -12,23 +14,27 @@ public interface ITransactionFileReader { bool ExistsAt(long position); } - public struct TFReaderLease : IDisposable { + public readonly struct TFReaderLease : IDisposable { public readonly ITransactionFileReader Reader; private readonly ObjectPool _pool; - public TFReaderLease(ObjectPool pool) { + public TFReaderLease(ObjectPool pool, ITransactionFileTracker tracker) { _pool = pool; Reader = pool.Get(); + Reader.OnCheckedOut(tracker); } public TFReaderLease(ITransactionFileReader reader) { _pool = null; Reader = reader; + //qq what do we want to do about providing/clearing a tracker here? } void IDisposable.Dispose() { - if (_pool != null) + if (_pool != null) { + Reader.OnReturned(); _pool.Return(Reader); + } } public void Reposition(long position) { diff --git a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkReaderForExecutor.cs b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkReaderForExecutor.cs index a66c382518f..28505e7ec8f 100644 --- a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkReaderForExecutor.cs +++ b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkReaderForExecutor.cs @@ -30,7 +30,7 @@ public IEnumerable ReadInto( RecordForExecutor.NonPrepare nonPrepare, RecordForExecutor.Prepare prepare) { - var result = _chunk.TryReadFirst(); + var result = _chunk.TryReadFirst(ITransactionFileTracker.NoOp); //qq while (result.Success) { var record = result.LogRecord; if (record.RecordType != LogRecordType.Prepare) { @@ -51,7 +51,7 @@ public IEnumerable ReadInto( yield return true; } - result = _chunk.TryReadClosestForward(result.NextPosition); + result = _chunk.TryReadClosestForward(result.NextPosition, ITransactionFileTracker.NoOp); //qq } } } diff --git a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkReaderForIndexExecutor.cs b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkReaderForIndexExecutor.cs index f35c97556b7..22dcbf81bfe 100644 --- a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkReaderForIndexExecutor.cs +++ b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkReaderForIndexExecutor.cs @@ -3,14 +3,14 @@ namespace EventStore.Core.TransactionLog.Scavenging { public class ChunkReaderForIndexExecutor : IChunkReaderForIndexExecutor { - private readonly Func _tfReaderFactory; + private readonly Func _tfReaderFactory; - public ChunkReaderForIndexExecutor(Func tfReaderFactory) { + public ChunkReaderForIndexExecutor(Func tfReaderFactory) { _tfReaderFactory = tfReaderFactory; } public bool TryGetStreamId(long position, out TStreamId streamId) { - using (var reader = _tfReaderFactory()) { + using (var reader = _tfReaderFactory(ITransactionFileTracker.NoOp)) { //qq var result = reader.TryReadAt(position, couldBeScavenged: true); if (!result.Success) { streamId = default; diff --git a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/IndexReaderForCalculator.cs b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/IndexReaderForCalculator.cs index 592e09e1f4d..06a9a237284 100644 --- a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/IndexReaderForCalculator.cs +++ b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/IndexReaderForCalculator.cs @@ -5,12 +5,12 @@ namespace EventStore.Core.TransactionLog.Scavenging { public class IndexReaderForCalculator : IIndexReaderForCalculator { private readonly IReadIndex _readIndex; - private readonly Func _tfReaderFactory; + private readonly Func _tfReaderFactory; private readonly Func _lookupUniqueHashUser; public IndexReaderForCalculator( IReadIndex readIndex, - Func tfReaderFactory, + Func tfReaderFactory, Func lookupUniqueHashUser) { _readIndex = readIndex; @@ -66,7 +66,7 @@ public IndexReadEventInfoResult ReadEventInfoForward( } public bool IsTombstone(long logPosition) { - using (var reader = _tfReaderFactory()) { + using (var reader = _tfReaderFactory(ITransactionFileTracker.NoOp)) { //qq var result = reader.TryReadAt(logPosition, couldBeScavenged: true); if (!result.Success) From f2a740829e065e5947e0fc2edb2ed43be6172416 Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Thu, 21 Nov 2024 11:27:16 +0000 Subject: [PATCH 04/38] more --- ...write_before_the_transaction_is_present.cs | 5 +++-- ...action_end_but_before_commit_is_present.cs | 5 +++-- ...action_end_but_before_commit_is_present.cs | 5 +++-- ...hen_reading_all_with_disallowed_streams.cs | 11 +++++----- .../when_reading_all_with_filtering.cs | 9 +++++---- ...ing_all_with_filtering_and_transactions.cs | 4 +++- .../when_reading_all_with_last_indexed_set.cs | 3 ++- ..._tfile_with_multiple_events_in_a_stream.cs | 3 ++- ...dex_off_tfile_with_prepares_and_commits.cs | 3 ++- ...rsion_numbers_greater_than_int_maxvalue.cs | 3 ++- ...mmits_for_log_records_of_mixed_versions.cs | 3 ++- ..._off_tfile_with_prepares_but_no_commits.cs | 3 ++- ...dex_off_tfile_with_two_events_in_stream.cs | 3 ++- ...prepare_but_no_commit_read_index_should.cs | 3 ++- ...elete_on_this_version_read_index_should.cs | 3 ++- ..._deleted_event_stream_read_index_should.cs | 3 ++- ...second_stream_deleted_read_index_should.cs | 3 ++- .../TransactionLog/FakeReadIndex.cs | 6 ++++-- .../Services/Storage/ReaderIndex/AllReader.cs | 20 +++++++++++-------- .../Storage/ReaderIndex/IReadIndex.cs | 6 ++++-- .../Services/Storage/ReaderIndex/ReadIndex.cs | 9 +++++---- .../Services/Storage/StorageReaderWorker.cs | 6 ++++-- .../Grpc/Enumerators.AllSubscription.cs | 3 ++- .../Enumerators.AllSubscriptionFiltered.cs | 3 ++- 24 files changed, 78 insertions(+), 47 deletions(-) diff --git a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_a_single_write_before_the_transaction_is_present.cs b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_a_single_write_before_the_transaction_is_present.cs index 8e7e2a16992..c19df8e45bd 100644 --- a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_a_single_write_before_the_transaction_is_present.cs +++ b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_a_single_write_before_the_transaction_is_present.cs @@ -1,4 +1,5 @@ using EventStore.Core.Tests.TransactionLog.Scavenging.Helpers; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; @@ -17,7 +18,7 @@ public void should_be_able_to_read_the_transactional_writes_when_the_commit_is_p Rec.Prepare(3, "single_write_stream_id_3", prepareFlags: PrepareFlags.SingleWrite | PrepareFlags.IsCommitted), Rec.Prepare(4, "single_write_stream_id_4", prepareFlags: PrepareFlags.SingleWrite | PrepareFlags.IsCommitted)); - var firstRead = ReadIndex.ReadAllEventsForward(new Data.TFPos(0, 0), 10); + var firstRead = ReadIndex.ReadAllEventsForward(new Data.TFPos(0, 0), 10, ITransactionFileTracker.NoOp); Assert.AreEqual(4, firstRead.Records.Count); Assert.AreEqual("single_write_stream_id_1", firstRead.Records[0].Event.EventStreamId); @@ -35,7 +36,7 @@ public void should_be_able_to_read_the_transactional_writes_when_the_commit_is_p Rec.Prepare(4, "single_write_stream_id_4", prepareFlags: PrepareFlags.SingleWrite | PrepareFlags.IsCommitted), Rec.Commit(1, "transaction_stream_id")); - var transactionRead = ReadIndex.ReadAllEventsForward(firstRead.NextPos, 10); + var transactionRead = ReadIndex.ReadAllEventsForward(firstRead.NextPos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(1, transactionRead.Records.Count); Assert.AreEqual("transaction_stream_id", transactionRead.Records[0].Event.EventStreamId); diff --git a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_a_single_write_is_after_transaction_end_but_before_commit_is_present.cs b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_a_single_write_is_after_transaction_end_but_before_commit_is_present.cs index 0bf02d98f7c..32c672be1c2 100644 --- a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_a_single_write_is_after_transaction_end_but_before_commit_is_present.cs +++ b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_a_single_write_is_after_transaction_end_but_before_commit_is_present.cs @@ -1,4 +1,5 @@ using EventStore.Core.Tests.TransactionLog.Scavenging.Helpers; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; @@ -14,7 +15,7 @@ public void should_be_able_to_read_the_transactional_writes_when_the_commit_is_p Rec.TransEnd(0, "transaction_stream_id"), Rec.Prepare(1, "single_write_stream_id", prepareFlags: PrepareFlags.SingleWrite | PrepareFlags.IsCommitted)); - var firstRead = ReadIndex.ReadAllEventsForward(new Data.TFPos(0, 0), 10); + var firstRead = ReadIndex.ReadAllEventsForward(new Data.TFPos(0, 0), 10, ITransactionFileTracker.NoOp); Assert.AreEqual(1, firstRead.Records.Count); Assert.AreEqual("single_write_stream_id", firstRead.Records[0].Event.EventStreamId); @@ -25,7 +26,7 @@ public void should_be_able_to_read_the_transactional_writes_when_the_commit_is_p Rec.Prepare(1, "single_write_stream_id", prepareFlags: PrepareFlags.SingleWrite | PrepareFlags.IsCommitted), Rec.Commit(0, "transaction_stream_id")); - var transactionRead = ReadIndex.ReadAllEventsForward(firstRead.NextPos, 10); + var transactionRead = ReadIndex.ReadAllEventsForward(firstRead.NextPos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(1, transactionRead.Records.Count); Assert.AreEqual("transaction_stream_id", transactionRead.Records[0].Event.EventStreamId); diff --git a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_multiple_single_writes_are_after_transaction_end_but_before_commit_is_present.cs b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_multiple_single_writes_are_after_transaction_end_but_before_commit_is_present.cs index 921ae0429bd..78f0b53d028 100644 --- a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_multiple_single_writes_are_after_transaction_end_but_before_commit_is_present.cs +++ b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_multiple_single_writes_are_after_transaction_end_but_before_commit_is_present.cs @@ -1,4 +1,5 @@ using EventStore.Core.Tests.TransactionLog.Scavenging.Helpers; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; @@ -22,7 +23,7 @@ public void should_be_able_to_read_the_transactional_writes_when_the_commit_is_p Rec.Prepare(2, "single_write_stream_id_2", prepareFlags: PrepareFlags.SingleWrite | PrepareFlags.IsCommitted), Rec.Prepare(3, "single_write_stream_id_3", prepareFlags: PrepareFlags.SingleWrite | PrepareFlags.IsCommitted)); - var firstRead = ReadIndex.ReadAllEventsForward(new Data.TFPos(0, 0), 10); + var firstRead = ReadIndex.ReadAllEventsForward(new Data.TFPos(0, 0), 10, ITransactionFileTracker.NoOp); Assert.AreEqual(3, firstRead.Records.Count); Assert.AreEqual("single_write_stream_id_1", firstRead.Records[0].Event.EventStreamId); @@ -38,7 +39,7 @@ public void should_be_able_to_read_the_transactional_writes_when_the_commit_is_p Rec.Prepare(3, "single_write_stream_id_3", prepareFlags: PrepareFlags.SingleWrite | PrepareFlags.IsCommitted), Rec.Commit(0, "transaction_stream_id")); - var transactionRead = ReadIndex.ReadAllEventsForward(firstRead.NextPos, 10); + var transactionRead = ReadIndex.ReadAllEventsForward(firstRead.NextPos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(1, transactionRead.Records.Count); Assert.AreEqual("transaction_stream_id", transactionRead.Records[0].Event.EventStreamId); diff --git a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_disallowed_streams.cs b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_disallowed_streams.cs index cb7725d6e85..6fabcde2a63 100644 --- a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_disallowed_streams.cs +++ b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_disallowed_streams.cs @@ -4,6 +4,7 @@ using NUnit.Framework; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; namespace EventStore.Core.Tests.Services.Storage.AllReader { @@ -36,7 +37,7 @@ protected override void WriteTestScenario() { [Test] public void should_filter_out_disallowed_streams_when_reading_events_forward() { - var records = ReadIndex.ReadAllEventsForward(_forwardReadPos, 10).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(_forwardReadPos, 10, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(2, records.Count); Assert.True(records.All(x => x.Event.EventStreamId != _disallowedStream)); Assert.True(records.Any(x => x.Event.EventStreamId == _allowedStream1)); @@ -50,7 +51,7 @@ public void should_filter_out_disallowed_streams_when_reading_events_forward_wit Filter.Types.FilterType.Prefix, new[] {"event-type"}); var eventFilter = EventFilter.Get(true, filter); - var result = ReadIndex.ReadAllEventsForwardFiltered(_forwardReadPos, 10, 10, eventFilter); + var result = ReadIndex.ReadAllEventsForwardFiltered(_forwardReadPos, 10, 10, eventFilter, ITransactionFileTracker.NoOp); Assert.AreEqual(2, result.Records.Count); Assert.True(result.Records.All(x => x.Event.EventStreamId != _disallowedStream)); Assert.True(result.Records.Any(x => x.Event.EventStreamId == _allowedStream1)); @@ -64,7 +65,7 @@ public void should_filter_out_disallowed_streams_when_reading_events_forward_wit Filter.Types.FilterType.Regex, new[] {@"^.*event-type-.*$"}); var eventFilter = EventFilter.Get(true, filter); - var result = ReadIndex.ReadAllEventsForwardFiltered(_forwardReadPos, 10, 10, eventFilter); + var result = ReadIndex.ReadAllEventsForwardFiltered(_forwardReadPos, 10, 10, eventFilter, ITransactionFileTracker.NoOp); Assert.AreEqual(2, result.Records.Count); Assert.True(result.Records.All(x => x.Event.EventStreamId != _disallowedStream)); Assert.True(result.Records.Any(x => x.Event.EventStreamId == _allowedStream1)); @@ -78,7 +79,7 @@ public void should_filter_out_disallowed_streams_when_reading_events_forward_wit Filter.Types.FilterType.Prefix, new[] {"$persistentsubscripti"}); var eventFilter = EventFilter.Get(true, filter); - var result = ReadIndex.ReadAllEventsForwardFiltered(_forwardReadPos, 10, 10, eventFilter); + var result = ReadIndex.ReadAllEventsForwardFiltered(_forwardReadPos, 10, 10, eventFilter, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.Records.Count); Assert.True(result.Records.All(x => x.Event.EventStreamId != _disallowedStream)); Assert.True(result.Records.Any(x => x.Event.EventStreamId == _allowedStream2)); @@ -91,7 +92,7 @@ public void should_filter_out_disallowed_streams_when_reading_events_forward_wit Filter.Types.FilterType.Regex, new[] {@"^.*istentsubsc.*$"}); var eventFilter = EventFilter.Get(true, filter); - var result = ReadIndex.ReadAllEventsForwardFiltered(_forwardReadPos, 10, 10, eventFilter); + var result = ReadIndex.ReadAllEventsForwardFiltered(_forwardReadPos, 10, 10, eventFilter, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.Records.Count); Assert.True(result.Records.All(x => x.Event.EventStreamId != _disallowedStream)); Assert.True(result.Records.Any(x => x.Event.EventStreamId == _allowedStream2)); diff --git a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_filtering.cs b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_filtering.cs index e6e3eeddc0a..6649f4fff1e 100644 --- a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_filtering.cs +++ b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_filtering.cs @@ -4,6 +4,7 @@ using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; namespace EventStore.Core.Tests.Services.Storage.AllReader { @@ -34,7 +35,7 @@ public void should_read_only_events_forward_with_event_type_prefix() { Filter.Types.FilterType.Prefix, new[] {"event-type"}); var eventFilter = EventFilter.Get(true, filter); - var result = ReadIndex.ReadAllEventsForwardFiltered(_forwardReadPos, 10, 10, eventFilter); + var result = ReadIndex.ReadAllEventsForwardFiltered(_forwardReadPos, 10, 10, eventFilter, ITransactionFileTracker.NoOp); Assert.AreEqual(2, result.Records.Count); } @@ -45,7 +46,7 @@ public void should_read_only_events_forward_with_event_type_regex() { Filter.Types.FilterType.Regex, new[] {@"^.*other-event.*$"}); var eventFilter = EventFilter.Get(true, filter); - var result = ReadIndex.ReadAllEventsForwardFiltered(_forwardReadPos, 10, 10, eventFilter); + var result = ReadIndex.ReadAllEventsForwardFiltered(_forwardReadPos, 10, 10, eventFilter, ITransactionFileTracker.NoOp); Assert.AreEqual(2, result.Records.Count); } @@ -56,7 +57,7 @@ public void should_read_only_events_forward_with_stream_id_prefix() { Filter.Types.FilterType.Prefix, new[] {"ES2"}); var eventFilter = EventFilter.Get(true, filter); - var result = ReadIndex.ReadAllEventsForwardFiltered(_forwardReadPos, 10, 10, eventFilter); + var result = ReadIndex.ReadAllEventsForwardFiltered(_forwardReadPos, 10, 10, eventFilter, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.Records.Count); } @@ -67,7 +68,7 @@ public void should_read_only_events_forward_with_stream_id_regex() { Filter.Types.FilterType.Regex, new[] {@"^.*ES2.*$"}); var eventFilter = EventFilter.Get(true, filter); - var result = ReadIndex.ReadAllEventsForwardFiltered(_forwardReadPos, 10, 10, eventFilter); + var result = ReadIndex.ReadAllEventsForwardFiltered(_forwardReadPos, 10, 10, eventFilter, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.Records.Count); } diff --git a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_filtering_and_transactions.cs b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_filtering_and_transactions.cs index 2e74ce4d2d9..8a89aadbefe 100644 --- a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_filtering_and_transactions.cs +++ b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_filtering_and_transactions.cs @@ -3,6 +3,7 @@ using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; using EventStore.Core.Tests.TransactionLog.Scavenging.Helpers; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.Services.Storage.AllReader { @@ -43,7 +44,8 @@ static Rec[] ExplicitTransaction(int transaction, string stream) => new[] { pos: new Data.TFPos(0, 0), maxCount: 10, maxSearchWindow: int.MaxValue, - eventFilter: EventFilter.StreamName.Prefixes(false, "included")); + eventFilter: EventFilter.StreamName.Prefixes(false, "included"), + tracker: ITransactionFileTracker.NoOp); Assert.AreEqual(10, read.Records.Count); for (int j = 0; j < 10; j++) diff --git a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_last_indexed_set.cs b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_last_indexed_set.cs index ed05a3a0548..9db70183174 100644 --- a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_last_indexed_set.cs +++ b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_last_indexed_set.cs @@ -2,6 +2,7 @@ using NUnit.Framework; using EventStore.Core.Data; using EventStore.Core.TransactionLog.LogRecords; +using EventStore.Core.TransactionLog; namespace EventStore.Core.Tests.Services.Storage.AllReader { [TestFixture(typeof(LogFormat.V2), typeof(string))] @@ -24,7 +25,7 @@ public void should_be_able_to_read_all_backwards() { [Test] public void should_be_able_to_read_all_forwards() { - var result = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10).EventRecords(); + var result = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(3, result.Count); } } diff --git a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_multiple_events_in_a_stream.cs b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_multiple_events_in_a_stream.cs index a2175b2592e..1a5ad026411 100644 --- a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_multiple_events_in_a_stream.cs +++ b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_multiple_events_in_a_stream.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -109,7 +110,7 @@ public void no_events_are_return_if_event_stream_doesnt_exist() { [Test] public void read_all_events_forward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(2, records.Count); Assert.AreEqual(_id1, records[0].Event.EventId); diff --git a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits.cs b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits.cs index a892fc61ee6..1e6a2a656d6 100644 --- a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits.cs +++ b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -105,7 +106,7 @@ public void the_stream_can_be_read_for_second_stream_from_event_number() { [Test] public void read_all_events_forward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(3, records.Count); Assert.AreEqual(_id1, records[0].Event.EventId); diff --git a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits_for_events_with_version_numbers_greater_than_int_maxvalue.cs b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits_for_events_with_version_numbers_greater_than_int_maxvalue.cs index 57c0c5a2a4a..6bff68d25d2 100644 --- a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits_for_events_with_version_numbers_greater_than_int_maxvalue.cs +++ b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits_for_events_with_version_numbers_greater_than_int_maxvalue.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -101,7 +102,7 @@ public void the_stream_can_be_read_for_second_stream_from_event_number() { [Test] public void read_all_events_forward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(3, records.Count); Assert.AreEqual(_id1, records[0].Event.EventId); diff --git a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits_for_log_records_of_mixed_versions.cs b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits_for_log_records_of_mixed_versions.cs index cde4fa1ff00..dae191a4e25 100644 --- a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits_for_log_records_of_mixed_versions.cs +++ b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits_for_log_records_of_mixed_versions.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using EventStore.LogCommon; using NUnit.Framework; @@ -98,7 +99,7 @@ public void the_stream_can_be_read_for_second_stream_from_event_number() { [Test] public void read_all_events_forward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10).Records; + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(3, records.Count); Assert.AreEqual(_id1, records[0].Event.EventId); diff --git a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_but_no_commits.cs b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_but_no_commits.cs index d51e8fcebd3..8c1dc8f2585 100644 --- a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_but_no_commits.cs +++ b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_but_no_commits.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; @@ -52,7 +53,7 @@ public void the_last_event_is_not_returned_for_stream() { [Test] public void read_all_events_forward_returns_no_events() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(0, records.Count); } diff --git a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_two_events_in_stream.cs b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_two_events_in_stream.cs index 3d0d0d8050d..c255867a9e2 100644 --- a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_two_events_in_stream.cs +++ b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_two_events_in_stream.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -100,7 +101,7 @@ public void the_stream_cant_be_read_for_second_stream() { [Test] public void read_all_events_forward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(2, records.Count); Assert.AreEqual(_id1, records[0].Event.EventId); diff --git a/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_writing_delete_prepare_but_no_commit_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_writing_delete_prepare_but_no_commit_read_index_should.cs index 7ab46d7121e..5b724a19256 100644 --- a/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_writing_delete_prepare_but_no_commit_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_writing_delete_prepare_but_no_commit_read_index_should.cs @@ -2,6 +2,7 @@ using System.Linq; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -71,7 +72,7 @@ public void read_stream_events_backward_should_return_commited_records() { [Test] public void read_all_forward_should_return_all_stream_records_except_uncommited() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(2, events.Length); diff --git a/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_writing_few_prepares_on_same_event_number_and_commiting_delete_on_this_version_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_writing_few_prepares_on_same_event_number_and_commiting_delete_on_this_version_read_index_should.cs index 121768c32ec..e7943768c0d 100644 --- a/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_writing_few_prepares_on_same_event_number_and_commiting_delete_on_this_version_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_writing_few_prepares_on_same_event_number_and_commiting_delete_on_this_version_read_index_should.cs @@ -2,6 +2,7 @@ using System.Linq; using EventStore.Core.Data; using EventStore.Core.Services; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -112,7 +113,7 @@ public void read_stream_events_backward_should_return_stream_deleted() { [Test] public void read_all_forward_should_return_all_stream_records_except_uncommited() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(1, events.Length); diff --git a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_single_deleted_event_stream_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_single_deleted_event_stream_read_index_should.cs index 97e424d7d53..093caf797d3 100644 --- a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_single_deleted_event_stream_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_single_deleted_event_stream_read_index_should.cs @@ -1,6 +1,7 @@ using System.Linq; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -111,7 +112,7 @@ public void not_find_record_with_non_existing_version_for_event_stream_with_same [Test] public void return_all_events_on_read_all_forward() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(2, events.Length); diff --git a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_three_collisioned_streams_one_event_each_with_second_stream_deleted_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_three_collisioned_streams_one_event_each_with_second_stream_deleted_read_index_should.cs index 6ff57263487..20f20c949fa 100644 --- a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_three_collisioned_streams_one_event_each_with_second_stream_deleted_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_three_collisioned_streams_one_event_each_with_second_stream_deleted_read_index_should.cs @@ -1,6 +1,7 @@ using System.Linq; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -195,7 +196,7 @@ public void return_empty_range_on_from_end_range_query_for_non_existing_stream_w [Test] public void return_all_events_on_read_all_forward() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(4, events.Length); diff --git a/src/EventStore.Core.Tests/TransactionLog/FakeReadIndex.cs b/src/EventStore.Core.Tests/TransactionLog/FakeReadIndex.cs index 35721d85c5b..e0bdac22bd3 100644 --- a/src/EventStore.Core.Tests/TransactionLog/FakeReadIndex.cs +++ b/src/EventStore.Core.Tests/TransactionLog/FakeReadIndex.cs @@ -6,6 +6,7 @@ using EventStore.Core.LogAbstraction; using EventStore.Core.Messages; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; namespace EventStore.Core.Tests.TransactionLog { @@ -82,7 +83,7 @@ public IndexReadEventInfoResult ReadEventInfoBackward_NoCollisions(ulong stream, throw new NotImplementedException(); } - public IndexReadAllResult ReadAllEventsForward(TFPos pos, int maxCount) { + public IndexReadAllResult ReadAllEventsForward(TFPos pos, int maxCount, ITransactionFileTracker tracker) { throw new NotImplementedException(); } @@ -91,7 +92,8 @@ public IndexReadAllResult ReadAllEventsBackward(TFPos pos, int maxCount) { } public IndexReadAllResult ReadAllEventsForwardFiltered(TFPos pos, int maxCount, int maxSearchWindow, - IEventFilter eventFilter) { + IEventFilter eventFilter, + ITransactionFileTracker tracker) { throw new NotImplementedException(); } diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/AllReader.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/AllReader.cs index 2ba411415df..e0a4620d833 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/AllReader.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/AllReader.cs @@ -13,14 +13,16 @@ public interface IAllReader { /// Returns event records in the sequence they were committed into TF. /// Positions is specified as pre-positions (pointer at the beginning of the record). /// - IndexReadAllResult ReadAllEventsForward(TFPos pos, int maxCount); + IndexReadAllResult ReadAllEventsForward(TFPos pos, int maxCount, + ITransactionFileTracker tracker); /// /// Returns event records whose eventType matches the given in the sequence they were committed into TF. /// Positions is specified as pre-positions (pointer at the beginning of the record). /// IndexReadAllResult FilteredReadAllEventsForward(TFPos pos, int maxCount, int maxSearchWindow, - IEventFilter eventFilter); + IEventFilter eventFilter, + ITransactionFileTracker tracker); /// /// Returns event records in the reverse sequence they were committed into TF. @@ -53,18 +55,20 @@ public AllReader(IIndexBackend backend, IIndexCommitter indexCommitter, INameLoo _eventTypes = eventTypes; } - public IndexReadAllResult ReadAllEventsForward(TFPos pos, int maxCount) { - return ReadAllEventsForwardInternal(pos, maxCount, int.MaxValue, EventFilter.DefaultAllFilter); + public IndexReadAllResult ReadAllEventsForward(TFPos pos, int maxCount, ITransactionFileTracker tracker) { + return ReadAllEventsForwardInternal(pos, maxCount, int.MaxValue, EventFilter.DefaultAllFilter, tracker); } public IndexReadAllResult FilteredReadAllEventsForward(TFPos pos, int maxCount, int maxSearchWindow, - IEventFilter eventFilter) { - return ReadAllEventsForwardInternal(pos, maxCount, maxSearchWindow, eventFilter); + IEventFilter eventFilter, + ITransactionFileTracker tracker) { + return ReadAllEventsForwardInternal(pos, maxCount, maxSearchWindow, eventFilter, tracker); } private IndexReadAllResult ReadAllEventsForwardInternal(TFPos pos, int maxCount, int maxSearchWindow, - IEventFilter eventFilter) { + IEventFilter eventFilter, + ITransactionFileTracker tracker) { var records = new List(); var nextPos = pos; // in case we are at position after which there is no commit at all, in that case we have to force @@ -74,7 +78,7 @@ private IndexReadAllResult ReadAllEventsForwardInternal(TFPos pos, int maxCount, var consideredEventsCount = 0L; var firstCommit = true; var reachedEndOfStream = false; - using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qqqqqqqqqqqqqqqqq push here + using (var reader = _backend.BorrowReader(tracker)) { long nextCommitPos = pos.CommitPosition; while (records.Count < maxCount && consideredEventsCount < maxSearchWindow) { if (nextCommitPos > _indexCommitter.LastIndexedPosition) { diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/IReadIndex.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/IReadIndex.cs index 3738285f3df..56a96174cc1 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/IReadIndex.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/IReadIndex.cs @@ -2,6 +2,7 @@ using System.Security.Claims; using EventStore.Core.Data; using EventStore.Core.Messages; +using EventStore.Core.TransactionLog; using EventStore.Core.Util; namespace EventStore.Core.Services.Storage.ReaderIndex { @@ -14,7 +15,7 @@ public interface IReadIndex { /// Returns event records in the sequence they were committed into TF. /// Positions is specified as pre-positions (pointer at the beginning of the record). /// - IndexReadAllResult ReadAllEventsForward(TFPos pos, int maxCount); + IndexReadAllResult ReadAllEventsForward(TFPos pos, int maxCount, ITransactionFileTracker tracker = null); //qqqqq make not optional /// /// Returns event records in the reverse sequence they were committed into TF. @@ -27,7 +28,8 @@ public interface IReadIndex { /// Positions is specified as pre-positions (pointer at the beginning of the record). /// IndexReadAllResult ReadAllEventsForwardFiltered(TFPos pos, int maxCount, int maxSearchWindow, - IEventFilter eventFilter); + IEventFilter eventFilter, + ITransactionFileTracker tracker = null); //qqqqq make not optional /// /// Returns event records whose eventType matches the given EventFilter in the sequence they were committed into TF. diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/ReadIndex.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/ReadIndex.cs index 46b1e427e03..c4574758771 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/ReadIndex.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/ReadIndex.cs @@ -164,13 +164,14 @@ public TStreamId GetEventStreamIdByTransactionId(long transactionId) { return _indexReader.GetEventStreamIdByTransactionId(transactionId); } - IndexReadAllResult IReadIndex.ReadAllEventsForward(TFPos pos, int maxCount) { - return _allReader.ReadAllEventsForward(pos, maxCount); + IndexReadAllResult IReadIndex.ReadAllEventsForward(TFPos pos, int maxCount, ITransactionFileTracker tracker) { + return _allReader.ReadAllEventsForward(pos, maxCount, tracker); } IndexReadAllResult IReadIndex.ReadAllEventsForwardFiltered(TFPos pos, int maxCount, int maxSearchWindow, - IEventFilter eventFilter) { - return _allReader.FilteredReadAllEventsForward(pos, maxCount, maxSearchWindow, eventFilter); + IEventFilter eventFilter, + ITransactionFileTracker tracker) { + return _allReader.FilteredReadAllEventsForward(pos, maxCount, maxSearchWindow, eventFilter, tracker); } IndexReadAllResult IReadIndex.ReadAllEventsBackwardFiltered(TFPos pos, int maxCount, int maxSearchWindow, diff --git a/src/EventStore.Core/Services/Storage/StorageReaderWorker.cs b/src/EventStore.Core/Services/Storage/StorageReaderWorker.cs index db5ab5d01ea..ad46fa18fdf 100644 --- a/src/EventStore.Core/Services/Storage/StorageReaderWorker.cs +++ b/src/EventStore.Core/Services/Storage/StorageReaderWorker.cs @@ -13,6 +13,7 @@ using EventStore.Core.Services.TimerService; using EventStore.Core.Messaging; using ILogger = Serilog.ILogger; +using EventStore.Core.TransactionLog; namespace EventStore.Core.Services.Storage { public abstract class StorageReaderWorker { @@ -435,7 +436,7 @@ private ClientMessage.ReadAllEventsForwardCompleted if (msg.ValidationTfLastCommitPosition == lastIndexedPosition) return NoData(msg, ReadAllResult.NotModified, pos, lastIndexedPosition); - var res = _readIndex.ReadAllEventsForward(pos, msg.MaxCount); + var res = _readIndex.ReadAllEventsForward(pos, msg.MaxCount, ITransactionFileTracker.NoOp); //qq var resolved = ResolveReadAllResult(res.Records, msg.ResolveLinkTos, msg.User); if (resolved == null) return NoData(msg, ReadAllResult.AccessDenied, pos, lastIndexedPosition); @@ -510,7 +511,8 @@ private ClientMessage.FilteredReadAllEventsForwardCompleted FilteredReadAllEvent lastIndexedPosition); var res = _readIndex.ReadAllEventsForwardFiltered(pos, msg.MaxCount, msg.MaxSearchWindow, - msg.EventFilter); + msg.EventFilter, + ITransactionFileTracker.NoOp); //qqqqqqqqqqqqqqq push here var resolved = ResolveReadAllResult(res.Records, msg.ResolveLinkTos, msg.User); if (resolved == null) return NoDataForFilteredCommand(msg, FilteredReadAllResult.AccessDenied, pos, diff --git a/src/EventStore.Core/Services/Transport/Grpc/Enumerators.AllSubscription.cs b/src/EventStore.Core/Services/Transport/Grpc/Enumerators.AllSubscription.cs index 0b984ddeb42..6435f8f9b67 100644 --- a/src/EventStore.Core/Services/Transport/Grpc/Enumerators.AllSubscription.cs +++ b/src/EventStore.Core/Services/Transport/Grpc/Enumerators.AllSubscription.cs @@ -10,6 +10,7 @@ using EventStore.Core.Messages; using EventStore.Core.Messaging; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using Serilog; namespace EventStore.Core.Services.Transport.Grpc { @@ -128,7 +129,7 @@ private void Subscribe(Position? startPosition) { var (commitPosition, preparePosition) = startPosition.Value.ToInt64(); try { var indexResult = - _readIndex.ReadAllEventsForward(new TFPos(commitPosition, preparePosition), 1); + _readIndex.ReadAllEventsForward(new TFPos(commitPosition, preparePosition), 1, ITransactionFileTracker.NoOp); CatchUp(Position.FromInt64(indexResult.NextPos.CommitPosition, indexResult.NextPos.PreparePosition)); } catch (Exception ex) { diff --git a/src/EventStore.Core/Services/Transport/Grpc/Enumerators.AllSubscriptionFiltered.cs b/src/EventStore.Core/Services/Transport/Grpc/Enumerators.AllSubscriptionFiltered.cs index 3e3c1de3702..18b4c04f7e3 100644 --- a/src/EventStore.Core/Services/Transport/Grpc/Enumerators.AllSubscriptionFiltered.cs +++ b/src/EventStore.Core/Services/Transport/Grpc/Enumerators.AllSubscriptionFiltered.cs @@ -10,6 +10,7 @@ using EventStore.Core.Messages; using EventStore.Core.Messaging; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using Serilog; using IReadIndex = EventStore.Core.Services.Storage.ReaderIndex.IReadIndex; @@ -151,7 +152,7 @@ private void Subscribe(Position? startPosition) { var (commitPosition, preparePosition) = startPosition.Value.ToInt64(); try { var indexResult = - _readIndex.ReadAllEventsForward(new TFPos(commitPosition, preparePosition), 1); + _readIndex.ReadAllEventsForward(new TFPos(commitPosition, preparePosition), 1, ITransactionFileTracker.NoOp); CatchUp(Position.FromInt64(indexResult.NextPos.CommitPosition, indexResult.NextPos.PreparePosition)); } catch (Exception ex) { From 242e1f66f8b238d93a9b3e9ff5a1dfdeef29d85c Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Thu, 21 Nov 2024 16:02:13 +0000 Subject: [PATCH 05/38] create transaction file trackers on demand per user --- .../Chunks/TFChunkTrackerTests.cs | 17 ++++---- src/EventStore.Core/ClusterVNode.cs | 4 +- src/EventStore.Core/MetricsBootstrapper.cs | 9 ++-- .../Services/Storage/StorageReaderService.cs | 4 +- .../Services/Storage/StorageReaderWorker.cs | 9 +++- .../Chunks/TFChunk/TFChunkReadSide.cs | 4 +- .../Chunks/TransactionFileTracker.cs | 30 ++++++++++---- .../Chunks/TransactionFileTrackerFactory.cs | 41 +++++++++++++++++++ .../TransactionLog/ITransactionFileTracker.cs | 6 ++- .../ITransactionFileTrackerFactory.cs | 17 ++++++++ 10 files changed, 112 insertions(+), 29 deletions(-) create mode 100644 src/EventStore.Core/TransactionLog/Chunks/TransactionFileTrackerFactory.cs create mode 100644 src/EventStore.Core/TransactionLog/ITransactionFileTrackerFactory.cs diff --git a/src/EventStore.Core.XUnit.Tests/TransactionLog/Chunks/TFChunkTrackerTests.cs b/src/EventStore.Core.XUnit.Tests/TransactionLog/Chunks/TFChunkTrackerTests.cs index 2adef601e37..ce2b1b428d3 100644 --- a/src/EventStore.Core.XUnit.Tests/TransactionLog/Chunks/TFChunkTrackerTests.cs +++ b/src/EventStore.Core.XUnit.Tests/TransactionLog/Chunks/TFChunkTrackerTests.cs @@ -3,6 +3,7 @@ using System.Diagnostics.Metrics; using System.Linq; using EventStore.Core.Metrics; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.LogRecords; using EventStore.Core.XUnit.Tests.Metrics; @@ -11,7 +12,7 @@ namespace EventStore.Core.XUnit.Tests.TransactionLog.Chunks; public class TFChunkTrackerTests : IDisposable { - private readonly TFChunkTracker _sut; + private readonly ITransactionFileTracker _sut; private readonly TestMeterListener _listener; public TFChunkTrackerTests() { @@ -20,10 +21,10 @@ public TFChunkTrackerTests() { var byteMetric = new CounterMetric(meter, "eventstore-io", unit: "bytes"); var eventMetric = new CounterMetric(meter, "eventstore-io", unit: "events"); - var readTag = new KeyValuePair("activity", "read"); - _sut = new TFChunkTracker( - readBytes: new CounterSubMetric(byteMetric, new[] {readTag}), - readEvents: new CounterSubMetric(eventMetric, new[] {readTag})); + _sut = new TransactionFileTrackerFactory( + eventMetric: eventMetric, + byteMetric: byteMetric) + .GetOrAdd("alice"); } public void Dispose() { @@ -36,7 +37,7 @@ public void can_observe_prepare_log() { data: new byte[5], meta: new byte[5]); - _sut.OnRead(prepare); + _sut.OnRead(prepare, cached: false); //qqqq update these tests _listener.Observe(); AssertEventsRead(1); @@ -46,7 +47,7 @@ public void can_observe_prepare_log() { [Fact] public void disregard_system_log() { var system = CreateSystemRecord(); - _sut.OnRead(system); + _sut.OnRead(system, cached: false); _listener.Observe(); AssertEventsRead(0); @@ -56,7 +57,7 @@ public void disregard_system_log() { [Fact] public void disregard_commit_log() { var system = CreateCommit(); - _sut.OnRead(system); + _sut.OnRead(system, cached: false); _listener.Observe(); AssertEventsRead(0); diff --git a/src/EventStore.Core/ClusterVNode.cs b/src/EventStore.Core/ClusterVNode.cs index 540d0c12a36..4620fc844d2 100644 --- a/src/EventStore.Core/ClusterVNode.cs +++ b/src/EventStore.Core/ClusterVNode.cs @@ -734,7 +734,9 @@ TFChunkDbConfig CreateDbConfig( var storageReader = new StorageReaderService(_mainQueue, _mainBus, readIndex, logFormat.SystemStreams, - readerThreadsCount, Db.Config.WriterCheckpoint.AsReadOnly(), inMemReader, _queueStatsManager, + readerThreadsCount, Db.Config.WriterCheckpoint.AsReadOnly(), inMemReader, + trackers.TransactionFileTrackers, + _queueStatsManager, trackers.QueueTrackers); _mainBus.Subscribe(storageReader); diff --git a/src/EventStore.Core/MetricsBootstrapper.cs b/src/EventStore.Core/MetricsBootstrapper.cs index 37e77f765fe..55fd8a1c956 100644 --- a/src/EventStore.Core/MetricsBootstrapper.cs +++ b/src/EventStore.Core/MetricsBootstrapper.cs @@ -25,7 +25,7 @@ public class Trackers { public GrpcTrackers GrpcTrackers { get; } = new(); public QueueTrackers QueueTrackers { get; set; } = new(); public GossipTrackers GossipTrackers { get; set; } = new (); - public ITransactionFileTracker TransactionFileTracker { get; set; } = ITransactionFileTracker.NoOp; //qq see where this is needed now + public ITransactionFileTrackerFactory TransactionFileTrackers { get; set; } = ITransactionFileTrackerFactory.NoOp; public IIndexTracker IndexTracker { get; set; } = new IndexTracker.NoOp(); public IMaxTracker WriterFlushSizeTracker { get; set; } = new MaxTracker.NoOp(); public IDurationMaxTracker WriterFlushDurationTracker { get; set; } = new DurationMaxTracker.NoOp(); @@ -113,10 +113,9 @@ public static void Bootstrap( // events if (conf.Events.TryGetValue(Conf.EventTracker.Read, out var readEnabled) && readEnabled) { - var readTag = new KeyValuePair("activity", "read"); - trackers.TransactionFileTracker = new TFChunkTracker( - readBytes: new CounterSubMetric(byteMetric, new[] {readTag}), - readEvents: new CounterSubMetric(eventMetric, new[] {readTag})); + trackers.TransactionFileTrackers = new TransactionFileTrackerFactory( + eventMetric: eventMetric, + byteMetric: byteMetric); } // from a users perspective an event is written when it is indexed: thats when it can be read. diff --git a/src/EventStore.Core/Services/Storage/StorageReaderService.cs b/src/EventStore.Core/Services/Storage/StorageReaderService.cs index a60957184be..36bfbdedcf1 100644 --- a/src/EventStore.Core/Services/Storage/StorageReaderService.cs +++ b/src/EventStore.Core/Services/Storage/StorageReaderService.cs @@ -7,6 +7,7 @@ using EventStore.Core.Messaging; using EventStore.Core.Metrics; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Checkpoint; using ILogger = Serilog.ILogger; @@ -33,6 +34,7 @@ public StorageReaderService( int threadCount, IReadOnlyCheckpoint writerCheckpoint, IInMemoryStreamReader inMemReader, + ITransactionFileTrackerFactory tfTrackers, QueueStatsManager queueStatsManager, QueueTrackers trackers) { @@ -49,7 +51,7 @@ public StorageReaderService( StorageReaderWorker[] readerWorkers = new StorageReaderWorker[threadCount]; InMemoryBus[] storageReaderBuses = new InMemoryBus[threadCount]; for (var i = 0; i < threadCount; i++) { - readerWorkers[i] = new StorageReaderWorker(bus, readIndex, systemStreams, writerCheckpoint, inMemReader, i); + readerWorkers[i] = new StorageReaderWorker(bus, readIndex, systemStreams, writerCheckpoint, inMemReader, tfTrackers, i); storageReaderBuses[i] = new InMemoryBus("StorageReaderBus", watchSlowMsg: false); storageReaderBuses[i].Subscribe(readerWorkers[i]); storageReaderBuses[i].Subscribe(readerWorkers[i]); diff --git a/src/EventStore.Core/Services/Storage/StorageReaderWorker.cs b/src/EventStore.Core/Services/Storage/StorageReaderWorker.cs index ad46fa18fdf..f520e34ac3a 100644 --- a/src/EventStore.Core/Services/Storage/StorageReaderWorker.cs +++ b/src/EventStore.Core/Services/Storage/StorageReaderWorker.cs @@ -14,6 +14,7 @@ using EventStore.Core.Messaging; using ILogger = Serilog.ILogger; using EventStore.Core.TransactionLog; +using EventStore.Core.TransactionLog.Chunks; namespace EventStore.Core.Services.Storage { public abstract class StorageReaderWorker { @@ -39,6 +40,7 @@ public class StorageReaderWorker : private readonly ISystemStreamLookup _systemStreams; private readonly IReadOnlyCheckpoint _writerCheckpoint; private readonly IInMemoryStreamReader _inMemReader; + private readonly ITransactionFileTrackerFactory _trackers; private readonly int _queueId; private static readonly char[] LinkToSeparator = { '@' }; private const int MaxPageSize = 4096; @@ -55,6 +57,7 @@ public StorageReaderWorker( ISystemStreamLookup systemStreams, IReadOnlyCheckpoint writerCheckpoint, IInMemoryStreamReader inMemReader, + ITransactionFileTrackerFactory trackers, int queueId) { Ensure.NotNull(publisher, "publisher"); Ensure.NotNull(readIndex, "readIndex"); @@ -67,6 +70,7 @@ public StorageReaderWorker( _writerCheckpoint = writerCheckpoint; _queueId = queueId; _inMemReader = inMemReader; + _trackers = trackers; } void IHandle.Handle(ClientMessage.ReadEvent msg) { @@ -510,9 +514,12 @@ private ClientMessage.FilteredReadAllEventsForwardCompleted FilteredReadAllEvent return NoDataForFilteredCommand(msg, FilteredReadAllResult.NotModified, pos, lastIndexedPosition); + //qq is all this info necessarily here? do all messages have a non null user, do all users (cps) have names + var tracker = _trackers.GetOrAdd(msg.User.Identity.Name); + var res = _readIndex.ReadAllEventsForwardFiltered(pos, msg.MaxCount, msg.MaxSearchWindow, msg.EventFilter, - ITransactionFileTracker.NoOp); //qqqqqqqqqqqqqqq push here + tracker); var resolved = ResolveReadAllResult(res.Records, msg.ResolveLinkTos, msg.User); if (resolved == null) return NoDataForFilteredCommand(msg, FilteredReadAllResult.AccessDenied, pos, diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunkReadSide.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunkReadSide.cs index 0f617b216a0..b842fcc0f75 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunkReadSide.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunkReadSide.cs @@ -623,7 +623,7 @@ record = null; ValidateRecordLength(length, actualPosition); record = LogRecord.ReadFrom(workItem.Reader, length); - workItem.Tracker.OnRead(record); + workItem.Tracker.OnRead(record, Chunk.IsCached); int suffixLength = workItem.Reader.ReadInt32(); ValidateSuffixLength(length, suffixLength, actualPosition); @@ -704,7 +704,7 @@ record = null; } record = LogRecord.ReadFrom(workItem.Reader, length); - workItem.Tracker.OnRead(record); + workItem.Tracker.OnRead(record, Chunk.IsCached); return true; } diff --git a/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs b/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs index 83409efdfc7..23421cb4574 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs @@ -1,26 +1,38 @@ #nullable enable + using EventStore.Core.Metrics; using EventStore.Core.TransactionLog.LogRecords; namespace EventStore.Core.TransactionLog.Chunks; public class TFChunkTracker : ITransactionFileTracker { - private readonly CounterSubMetric _readBytes; - private readonly CounterSubMetric _readEvents; + private readonly CounterSubMetric _readCachedBytes; + private readonly CounterSubMetric _readCachedEvents; + private readonly CounterSubMetric _readUncachedBytes; + private readonly CounterSubMetric _readUncachedEvents; public TFChunkTracker( - CounterSubMetric readBytes, - CounterSubMetric readEvents) { + CounterSubMetric readCachedBytes, + CounterSubMetric readCachedEvents, + CounterSubMetric readUncachedBytes, + CounterSubMetric readUncachedEvents) { - _readBytes = readBytes; - _readEvents = readEvents; + _readCachedBytes = readCachedBytes; + _readCachedEvents = readCachedEvents; + _readUncachedBytes = readUncachedBytes; + _readUncachedEvents = readUncachedEvents; } - public void OnRead(ILogRecord record) { + public void OnRead(ILogRecord record, bool cached) { if (record is not PrepareLogRecord prepare) return; - _readBytes.Add(prepare.Data.Length + prepare.Metadata.Length); - _readEvents.Add(1); + if (cached) { + _readCachedBytes.Add(prepare.Data.Length + prepare.Metadata.Length); + _readCachedEvents.Add(1); + } else { + _readUncachedBytes.Add(prepare.Data.Length + prepare.Metadata.Length); + _readUncachedEvents.Add(1); + } } } diff --git a/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTrackerFactory.cs b/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTrackerFactory.cs new file mode 100644 index 00000000000..f38b3a6d134 --- /dev/null +++ b/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTrackerFactory.cs @@ -0,0 +1,41 @@ +#nullable enable + +using System.Collections.Concurrent; +using System.Collections.Generic; +using EventStore.Core.Metrics; + +namespace EventStore.Core.TransactionLog.Chunks; + +public class TransactionFileTrackerFactory : ITransactionFileTrackerFactory { + private readonly ConcurrentDictionary _trackersByUser = new(); + private readonly CounterMetric _eventMetric; + private readonly CounterMetric _byteMetric; + + public TransactionFileTrackerFactory(CounterMetric eventMetric, CounterMetric byteMetric) { + _eventMetric = eventMetric; + _byteMetric = byteMetric; + } + + public ITransactionFileTracker GetOrAdd(string user) { + return _trackersByUser.GetOrAdd(user, Create); + } + + private ITransactionFileTracker Create(string user) { + var readTag = new KeyValuePair("activity", "read"); + var cachedTag = new KeyValuePair("cached", "true"); + var uncachedTag = new KeyValuePair("cached", "false"); + var userTag = new KeyValuePair("user", user); + + var tracker = new TFChunkTracker( + readCachedBytes: new CounterSubMetric(_byteMetric, [readTag, cachedTag, userTag]), + readCachedEvents: new CounterSubMetric(_eventMetric, [readTag, cachedTag, userTag]), + readUncachedBytes: new CounterSubMetric(_byteMetric, [readTag, uncachedTag, userTag]), + readUncachedEvents: new CounterSubMetric(_eventMetric, [readTag, uncachedTag, userTag])); + + return tracker; + } + + public void Clear() { + _trackersByUser.Clear(); + } +} diff --git a/src/EventStore.Core/TransactionLog/ITransactionFileTracker.cs b/src/EventStore.Core/TransactionLog/ITransactionFileTracker.cs index cc5a6defef6..12505106c8b 100644 --- a/src/EventStore.Core/TransactionLog/ITransactionFileTracker.cs +++ b/src/EventStore.Core/TransactionLog/ITransactionFileTracker.cs @@ -1,14 +1,16 @@ +#nullable enable + using EventStore.Core.TransactionLog.LogRecords; namespace EventStore.Core.TransactionLog; public interface ITransactionFileTracker { - void OnRead(ILogRecord record); + void OnRead(ILogRecord record, bool cached); static readonly ITransactionFileTracker NoOp = new NoOp(); } file class NoOp : ITransactionFileTracker { - public void OnRead(ILogRecord record) { + public void OnRead(ILogRecord record, bool cached) { } } diff --git a/src/EventStore.Core/TransactionLog/ITransactionFileTrackerFactory.cs b/src/EventStore.Core/TransactionLog/ITransactionFileTrackerFactory.cs new file mode 100644 index 00000000000..589dc99da7e --- /dev/null +++ b/src/EventStore.Core/TransactionLog/ITransactionFileTrackerFactory.cs @@ -0,0 +1,17 @@ +#nullable enable + +using EventStore.Core.Metrics; +using EventStore.Core.TransactionLog.Chunks; +using System.Collections.Concurrent; +using System.Collections.Generic; + +namespace EventStore.Core.TransactionLog; + +public interface ITransactionFileTrackerFactory { + ITransactionFileTracker GetOrAdd(string name); + static readonly ITransactionFileTrackerFactory NoOp = new NoOp(); +} + +file class NoOp : ITransactionFileTrackerFactory { + public ITransactionFileTracker GetOrAdd(string name) => ITransactionFileTracker.NoOp; +} From a740bb302a430d76b492d31e0f52884e4dcbb445 Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Fri, 22 Nov 2024 07:28:53 +0000 Subject: [PATCH 06/38] plumb it to enough places to see $all reads from the webui --- .../TransactionLog/FakeReadIndex.cs | 9 ++++++--- .../Scavenging/scavenged_chunk.cs | 3 ++- ...hen_appending_to_a_tfchunk_and_flushing.cs | 4 ++-- .../when_creating_tfchunk_from_empty_file.cs | 2 +- ..._reading_cached_empty_scavenged_tfchunk.cs | 4 ++-- .../when_reading_from_a_cached_tfchunk.cs | 4 ++-- ...eading_uncached_empty_scavenged_tfchunk.cs | 4 ++-- ...n_writing_multiple_records_to_a_tfchunk.cs | 6 +++--- .../Services/Storage/ReaderIndex/AllReader.cs | 20 +++++++++++-------- .../Storage/ReaderIndex/IReadIndex.cs | 5 +++-- .../Services/Storage/ReaderIndex/ReadIndex.cs | 13 +++++++----- .../Services/Storage/StorageReaderWorker.cs | 3 ++- .../TransactionLog/Chunks/TFChunk/TFChunk.cs | 8 ++++---- .../Chunks/TFChunk/TFChunkReadSide.cs | 20 +++++++++---------- .../TransactionLog/Chunks/TFChunkReader.cs | 4 ++-- .../ITransactionFileTrackerFactory.cs | 5 ----- 16 files changed, 61 insertions(+), 53 deletions(-) diff --git a/src/EventStore.Core.Tests/TransactionLog/FakeReadIndex.cs b/src/EventStore.Core.Tests/TransactionLog/FakeReadIndex.cs index e0bdac22bd3..3f95b03c573 100644 --- a/src/EventStore.Core.Tests/TransactionLog/FakeReadIndex.cs +++ b/src/EventStore.Core.Tests/TransactionLog/FakeReadIndex.cs @@ -83,11 +83,13 @@ public IndexReadEventInfoResult ReadEventInfoBackward_NoCollisions(ulong stream, throw new NotImplementedException(); } - public IndexReadAllResult ReadAllEventsForward(TFPos pos, int maxCount, ITransactionFileTracker tracker) { + public IndexReadAllResult ReadAllEventsForward(TFPos pos, int maxCount, + ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public IndexReadAllResult ReadAllEventsBackward(TFPos pos, int maxCount) { + public IndexReadAllResult ReadAllEventsBackward(TFPos pos, int maxCount, + ITransactionFileTracker tracker) { throw new NotImplementedException(); } @@ -98,7 +100,8 @@ public IndexReadAllResult ReadAllEventsForwardFiltered(TFPos pos, int maxCount, } public IndexReadAllResult ReadAllEventsBackwardFiltered(TFPos pos, int maxCount, int maxSearchWindow, - IEventFilter eventFilter) { + IEventFilter eventFilter, + ITransactionFileTracker tracker) { throw new NotImplementedException(); } diff --git a/src/EventStore.Core.Tests/TransactionLog/Scavenging/scavenged_chunk.cs b/src/EventStore.Core.Tests/TransactionLog/Scavenging/scavenged_chunk.cs index 666933c537a..4e4b57707f6 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Scavenging/scavenged_chunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Scavenging/scavenged_chunk.cs @@ -1,5 +1,6 @@ using System; using System.Collections.Generic; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.Chunks.TFChunk; using EventStore.Core.TransactionLog.LogRecords; @@ -29,7 +30,7 @@ public void is_fully_resident_in_memory_when_cached() { Assert.IsTrue(chunk.IsCached); - var last = chunk.TryReadLast(); + var last = chunk.TryReadLast(ITransactionFileTracker.NoOp); Assert.IsTrue(last.Success); Assert.AreEqual(map[map.Count - 1].ActualPos, last.LogRecord.LogPosition); diff --git a/src/EventStore.Core.Tests/TransactionLog/when_appending_to_a_tfchunk_and_flushing.cs b/src/EventStore.Core.Tests/TransactionLog/when_appending_to_a_tfchunk_and_flushing.cs index 6c859a395e9..314e909107b 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_appending_to_a_tfchunk_and_flushing.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_appending_to_a_tfchunk_and_flushing.cs @@ -84,7 +84,7 @@ public void the_record_can_be_read_as_closest_forward_to_pos_zero() { [Test] public void the_record_can_be_read_as_closest_backward_from_end() { - var res = _chunk.TryReadClosestBackward(_record.GetSizeWithLengthPrefixAndSuffix()); + var res = _chunk.TryReadClosestBackward(_record.GetSizeWithLengthPrefixAndSuffix(), ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(_record, res.LogRecord); Assert.AreEqual(0, res.NextPosition); @@ -92,7 +92,7 @@ public void the_record_can_be_read_as_closest_backward_from_end() { [Test] public void the_record_can_be_read_as_last_one() { - var res = _chunk.TryReadLast(); + var res = _chunk.TryReadLast(ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(_record, res.LogRecord); Assert.AreEqual(0, res.NextPosition); diff --git a/src/EventStore.Core.Tests/TransactionLog/when_creating_tfchunk_from_empty_file.cs b/src/EventStore.Core.Tests/TransactionLog/when_creating_tfchunk_from_empty_file.cs index 4ecca8c2e97..2729d235089 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_creating_tfchunk_from_empty_file.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_creating_tfchunk_from_empty_file.cs @@ -68,7 +68,7 @@ public void there_is_no_closest_backward_record_from_end() { [Test] public void there_is_no_last_record() { - var res = _chunk.TryReadLast(); + var res = _chunk.TryReadLast(ITransactionFileTracker.NoOp); Assert.IsFalse(res.Success); } } diff --git a/src/EventStore.Core.Tests/TransactionLog/when_reading_cached_empty_scavenged_tfchunk.cs b/src/EventStore.Core.Tests/TransactionLog/when_reading_cached_empty_scavenged_tfchunk.cs index ac1e76a795c..12b4e1f8747 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_reading_cached_empty_scavenged_tfchunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_reading_cached_empty_scavenged_tfchunk.cs @@ -38,12 +38,12 @@ public void no_record_can_be_read_as_closest_forward_record() { [Test] public void no_record_can_be_read_as_closest_backward_record() { - Assert.IsFalse(_chunk.TryReadClosestBackward(0).Success); + Assert.IsFalse(_chunk.TryReadClosestBackward(0, ITransactionFileTracker.NoOp).Success); } [Test] public void no_record_can_be_read_as_last_record() { - Assert.IsFalse(_chunk.TryReadLast().Success); + Assert.IsFalse(_chunk.TryReadLast(ITransactionFileTracker.NoOp).Success); } } } diff --git a/src/EventStore.Core.Tests/TransactionLog/when_reading_from_a_cached_tfchunk.cs b/src/EventStore.Core.Tests/TransactionLog/when_reading_from_a_cached_tfchunk.cs index abe91233678..e4e1dbb01fd 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_reading_from_a_cached_tfchunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_reading_from_a_cached_tfchunk.cs @@ -81,7 +81,7 @@ public void the_record_can_be_read_as_closest_forward_to_zero_pos() { [Test] public void the_record_can_be_read_as_closest_backward_from_end() { - var res = _cachedChunk.TryReadClosestBackward(_record.GetSizeWithLengthPrefixAndSuffix()); + var res = _cachedChunk.TryReadClosestBackward(_record.GetSizeWithLengthPrefixAndSuffix(), ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(0, res.NextPosition); Assert.AreEqual(_record, res.LogRecord); @@ -89,7 +89,7 @@ public void the_record_can_be_read_as_closest_backward_from_end() { [Test] public void the_record_can_be_read_as_last() { - var res = _cachedChunk.TryReadLast(); + var res = _cachedChunk.TryReadLast(ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(0, res.NextPosition); Assert.AreEqual(_record, res.LogRecord); diff --git a/src/EventStore.Core.Tests/TransactionLog/when_reading_uncached_empty_scavenged_tfchunk.cs b/src/EventStore.Core.Tests/TransactionLog/when_reading_uncached_empty_scavenged_tfchunk.cs index e27b6f707b3..14d377654f3 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_reading_uncached_empty_scavenged_tfchunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_reading_uncached_empty_scavenged_tfchunk.cs @@ -37,12 +37,12 @@ public void no_record_can_be_read_as_closest_forward_record() { [Test] public void no_record_can_be_read_as_closest_backward_record() { - Assert.IsFalse(_chunk.TryReadClosestBackward(0).Success); + Assert.IsFalse(_chunk.TryReadClosestBackward(0, ITransactionFileTracker.NoOp).Success); } [Test] public void no_record_can_be_read_as_last_record() { - Assert.IsFalse(_chunk.TryReadLast().Success); + Assert.IsFalse(_chunk.TryReadLast(ITransactionFileTracker.NoOp).Success); } } } diff --git a/src/EventStore.Core.Tests/TransactionLog/when_writing_multiple_records_to_a_tfchunk.cs b/src/EventStore.Core.Tests/TransactionLog/when_writing_multiple_records_to_a_tfchunk.cs index baafbe212c0..c343fe15523 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_writing_multiple_records_to_a_tfchunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_writing_multiple_records_to_a_tfchunk.cs @@ -109,7 +109,7 @@ public void cannot_read_past_second_record_with_closest_forward_method() { [Test] public void the_seconds_record_can_be_read_as_last() { - var res = _chunk.TryReadLast(); + var res = _chunk.TryReadLast(ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(_prepare1.GetSizeWithLengthPrefixAndSuffix(), res.NextPosition); Assert.AreEqual(_prepare2, res.LogRecord); @@ -117,7 +117,7 @@ public void the_seconds_record_can_be_read_as_last() { [Test] public void the_first_record_can_be_read_as_closest_backward_after_last() { - var res = _chunk.TryReadClosestBackward(_prepare1.GetSizeWithLengthPrefixAndSuffix()); + var res = _chunk.TryReadClosestBackward(_prepare1.GetSizeWithLengthPrefixAndSuffix(), ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(0, res.NextPosition); Assert.AreEqual(_prepare1, res.LogRecord); @@ -125,7 +125,7 @@ public void the_first_record_can_be_read_as_closest_backward_after_last() { [Test] public void cannot_read_backward_from_zero_pos() { - var res = _chunk.TryReadClosestBackward(0); + var res = _chunk.TryReadClosestBackward(0, ITransactionFileTracker.NoOp); Assert.IsFalse(res.Success); } } diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/AllReader.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/AllReader.cs index e0a4620d833..2ab45c4f4de 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/AllReader.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/AllReader.cs @@ -28,14 +28,16 @@ IndexReadAllResult FilteredReadAllEventsForward(TFPos pos, int maxCount, int max /// Returns event records in the reverse sequence they were committed into TF. /// Positions is specified as post-positions (pointer after the end of record). /// - IndexReadAllResult ReadAllEventsBackward(TFPos pos, int maxCount); + IndexReadAllResult ReadAllEventsBackward(TFPos pos, int maxCount, + ITransactionFileTracker tracker); /// /// Returns event records whose eventType matches the given in the sequence they were committed into TF. /// Positions is specified as pre-positions (pointer at the beginning of the record). /// IndexReadAllResult FilteredReadAllEventsBackward(TFPos pos, int maxCount, int maxSearchWindow, - IEventFilter eventFilter); + IEventFilter eventFilter, + ITransactionFileTracker tracker); } public class AllReader : IAllReader { @@ -185,17 +187,19 @@ private IndexReadAllResult ReadAllEventsForwardInternal(TFPos pos, int maxCount, } } - public IndexReadAllResult ReadAllEventsBackward(TFPos pos, int maxCount) { - return ReadAllEventsBackwardInternal(pos, maxCount, int.MaxValue, EventFilter.DefaultAllFilter); + public IndexReadAllResult ReadAllEventsBackward(TFPos pos, int maxCount, ITransactionFileTracker tracker) { + return ReadAllEventsBackwardInternal(pos, maxCount, int.MaxValue, EventFilter.DefaultAllFilter, tracker); } public IndexReadAllResult FilteredReadAllEventsBackward(TFPos pos, int maxCount, int maxSearchWindow, - IEventFilter eventFilter) { - return ReadAllEventsBackwardInternal(pos, maxCount, maxSearchWindow, eventFilter); + IEventFilter eventFilter, + ITransactionFileTracker tracker) { + return ReadAllEventsBackwardInternal(pos, maxCount, maxSearchWindow, eventFilter, tracker); } private IndexReadAllResult ReadAllEventsBackwardInternal(TFPos pos, int maxCount, int maxSearchWindow, - IEventFilter eventFilter) { + IEventFilter eventFilter, + ITransactionFileTracker tracker) { var records = new List(); var nextPos = pos; // in case we are at position after which there is no commit at all, in that case we have to force @@ -205,7 +209,7 @@ private IndexReadAllResult ReadAllEventsBackwardInternal(TFPos pos, int maxCount var consideredEventsCount = 0L; bool firstCommit = true; var reachedEndOfStream = false; - using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq + using (var reader = _backend.BorrowReader(tracker)) { //qq long nextCommitPostPos = pos.CommitPosition; while (records.Count < maxCount && consideredEventsCount < maxSearchWindow) { reader.Reposition(nextCommitPostPos); diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/IReadIndex.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/IReadIndex.cs index 56a96174cc1..be6bcc11676 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/IReadIndex.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/IReadIndex.cs @@ -21,7 +21,7 @@ public interface IReadIndex { /// Returns event records in the reverse sequence they were committed into TF. /// Positions is specified as post-positions (pointer after the end of record). /// - IndexReadAllResult ReadAllEventsBackward(TFPos pos, int maxCount); + IndexReadAllResult ReadAllEventsBackward(TFPos pos, int maxCount, ITransactionFileTracker tracker = null); //qq make not optional /// /// Returns event records whose eventType matches the given EventFilter in the sequence they were committed into TF. @@ -36,7 +36,8 @@ IndexReadAllResult ReadAllEventsForwardFiltered(TFPos pos, int maxCount, int max /// Positions is specified as pre-positions (pointer at the beginning of the record). /// IndexReadAllResult ReadAllEventsBackwardFiltered(TFPos pos, int maxCount, int maxSearchWindow, - IEventFilter eventFilter); + IEventFilter eventFilter, + ITransactionFileTracker tracker = null); //qqqqq make not optional void Close(); void Dispose(); diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/ReadIndex.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/ReadIndex.cs index c4574758771..22ef4361d6d 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/ReadIndex.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/ReadIndex.cs @@ -164,7 +164,8 @@ public TStreamId GetEventStreamIdByTransactionId(long transactionId) { return _indexReader.GetEventStreamIdByTransactionId(transactionId); } - IndexReadAllResult IReadIndex.ReadAllEventsForward(TFPos pos, int maxCount, ITransactionFileTracker tracker) { + IndexReadAllResult IReadIndex.ReadAllEventsForward(TFPos pos, int maxCount, + ITransactionFileTracker tracker) { return _allReader.ReadAllEventsForward(pos, maxCount, tracker); } @@ -175,12 +176,14 @@ IndexReadAllResult IReadIndex.ReadAllEventsForwardFiltered(TFPos pos, int maxCou } IndexReadAllResult IReadIndex.ReadAllEventsBackwardFiltered(TFPos pos, int maxCount, int maxSearchWindow, - IEventFilter eventFilter) { - return _allReader.FilteredReadAllEventsBackward(pos, maxCount, maxSearchWindow, eventFilter); + IEventFilter eventFilter, + ITransactionFileTracker tracker) { + return _allReader.FilteredReadAllEventsBackward(pos, maxCount, maxSearchWindow, eventFilter, tracker); } - IndexReadAllResult IReadIndex.ReadAllEventsBackward(TFPos pos, int maxCount) { - return _allReader.ReadAllEventsBackward(pos, maxCount); + IndexReadAllResult IReadIndex.ReadAllEventsBackward(TFPos pos, int maxCount, + ITransactionFileTracker tracker) { + return _allReader.ReadAllEventsBackward(pos, maxCount, tracker); } public StorageMessage.EffectiveAcl GetEffectiveAcl(TStreamId streamId) { diff --git a/src/EventStore.Core/Services/Storage/StorageReaderWorker.cs b/src/EventStore.Core/Services/Storage/StorageReaderWorker.cs index f520e34ac3a..fd293213caa 100644 --- a/src/EventStore.Core/Services/Storage/StorageReaderWorker.cs +++ b/src/EventStore.Core/Services/Storage/StorageReaderWorker.cs @@ -476,7 +476,8 @@ private ClientMessage.ReadAllEventsBackwardCompleted ReadAllEventsBackward( if (msg.ValidationTfLastCommitPosition == lastIndexedPosition) return NoData(msg, ReadAllResult.NotModified, pos, lastIndexedPosition); - var res = _readIndex.ReadAllEventsBackward(pos, msg.MaxCount); + var tracker = _trackers.GetOrAdd(msg.User.Identity.Name); + var res = _readIndex.ReadAllEventsBackward(pos, msg.MaxCount, tracker); var resolved = ResolveReadAllResult(res.Records, msg.ResolveLinkTos, msg.User); if (resolved == null) return NoData(msg, ReadAllResult.AccessDenied, pos, lastIndexedPosition); diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs index 68898c82d9a..49342573fec 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs @@ -778,12 +778,12 @@ public RawReadResult TryReadClosestForwardRaw(long logicalPosition, Func getBuffer); - RecordReadResult TryReadLast(); - RecordReadResult TryReadClosestBackward(long logicalPosition); + RecordReadResult TryReadLast(ITransactionFileTracker tracker); + RecordReadResult TryReadClosestBackward(long logicalPosition, ITransactionFileTracker tracker); } private class TFChunkReadSideUnscavenged : TFChunkReadSide, IChunkReadSide { @@ -106,12 +106,12 @@ public RawReadResult TryReadClosestForwardRaw(long logicalPosition, Func Chunk.LogicalDataSize) @@ -447,15 +447,15 @@ public RawReadResult TryReadClosestForwardRaw(long logicalPosition, Func MaxRetries) diff --git a/src/EventStore.Core/TransactionLog/ITransactionFileTrackerFactory.cs b/src/EventStore.Core/TransactionLog/ITransactionFileTrackerFactory.cs index 589dc99da7e..ba4e8781ee0 100644 --- a/src/EventStore.Core/TransactionLog/ITransactionFileTrackerFactory.cs +++ b/src/EventStore.Core/TransactionLog/ITransactionFileTrackerFactory.cs @@ -1,10 +1,5 @@ #nullable enable -using EventStore.Core.Metrics; -using EventStore.Core.TransactionLog.Chunks; -using System.Collections.Concurrent; -using System.Collections.Generic; - namespace EventStore.Core.TransactionLog; public interface ITransactionFileTrackerFactory { From 77df9d396fbfcbf5694aeee465c2ea489fd8b527 Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Fri, 22 Nov 2024 08:21:34 +0000 Subject: [PATCH 07/38] Enumerate source rather than just cached/uncached (allows for Archive later) --- .../Chunks/TFChunkTrackerTests.cs | 6 +-- .../Chunks/TFChunk/TFChunkReadSide.cs | 8 +++- .../Chunks/TransactionFileTracker.cs | 41 +++++++++---------- .../Chunks/TransactionFileTrackerFactory.cs | 13 +----- .../TransactionLog/ITransactionFileTracker.cs | 12 +++++- 5 files changed, 40 insertions(+), 40 deletions(-) diff --git a/src/EventStore.Core.XUnit.Tests/TransactionLog/Chunks/TFChunkTrackerTests.cs b/src/EventStore.Core.XUnit.Tests/TransactionLog/Chunks/TFChunkTrackerTests.cs index ce2b1b428d3..abfd7cba982 100644 --- a/src/EventStore.Core.XUnit.Tests/TransactionLog/Chunks/TFChunkTrackerTests.cs +++ b/src/EventStore.Core.XUnit.Tests/TransactionLog/Chunks/TFChunkTrackerTests.cs @@ -37,7 +37,7 @@ public void can_observe_prepare_log() { data: new byte[5], meta: new byte[5]); - _sut.OnRead(prepare, cached: false); //qqqq update these tests + _sut.OnRead(prepare, source: ITransactionFileTracker.Source.Unknown); //qqqq update these tests _listener.Observe(); AssertEventsRead(1); @@ -47,7 +47,7 @@ public void can_observe_prepare_log() { [Fact] public void disregard_system_log() { var system = CreateSystemRecord(); - _sut.OnRead(system, cached: false); + _sut.OnRead(system, source: ITransactionFileTracker.Source.Unknown); _listener.Observe(); AssertEventsRead(0); @@ -57,7 +57,7 @@ public void disregard_system_log() { [Fact] public void disregard_commit_log() { var system = CreateCommit(); - _sut.OnRead(system, cached: false); + _sut.OnRead(system, source: ITransactionFileTracker.Source.Unknown); _listener.Observe(); AssertEventsRead(0); diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunkReadSide.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunkReadSide.cs index fbb483b6a0e..7d7ced1e661 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunkReadSide.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunkReadSide.cs @@ -623,7 +623,9 @@ record = null; ValidateRecordLength(length, actualPosition); record = LogRecord.ReadFrom(workItem.Reader, length); - workItem.Tracker.OnRead(record, Chunk.IsCached); + workItem.Tracker.OnRead(record, Chunk.IsCached ? + ITransactionFileTracker.Source.ChunkCache : + ITransactionFileTracker.Source.Disk); int suffixLength = workItem.Reader.ReadInt32(); ValidateSuffixLength(length, suffixLength, actualPosition); @@ -704,7 +706,9 @@ record = null; } record = LogRecord.ReadFrom(workItem.Reader, length); - workItem.Tracker.OnRead(record, Chunk.IsCached); + workItem.Tracker.OnRead(record, Chunk.IsCached ? + ITransactionFileTracker.Source.ChunkCache : + ITransactionFileTracker.Source.Disk); return true; } diff --git a/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs b/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs index 23421cb4574..01b79c19542 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs @@ -1,38 +1,37 @@ #nullable enable +using System.Collections.Generic; using EventStore.Core.Metrics; using EventStore.Core.TransactionLog.LogRecords; namespace EventStore.Core.TransactionLog.Chunks; public class TFChunkTracker : ITransactionFileTracker { - private readonly CounterSubMetric _readCachedBytes; - private readonly CounterSubMetric _readCachedEvents; - private readonly CounterSubMetric _readUncachedBytes; - private readonly CounterSubMetric _readUncachedEvents; + private readonly (CounterSubMetric, CounterSubMetric)[] _subMetrics; - public TFChunkTracker( - CounterSubMetric readCachedBytes, - CounterSubMetric readCachedEvents, - CounterSubMetric readUncachedBytes, - CounterSubMetric readUncachedEvents) { + public TFChunkTracker(CounterMetric eventMetric, CounterMetric byteMetric, string user) { + _subMetrics = new (CounterSubMetric, CounterSubMetric)[(int)(ITransactionFileTracker.Source.EnumLength)]; + for (var i = 0; i < _subMetrics.Length; i++) { + var source = $"{(ITransactionFileTracker.Source)i}"; + _subMetrics[i] = ( + CreateSubMetric(eventMetric, source, user), + CreateSubMetric(byteMetric, source, user)); + } + } - _readCachedBytes = readCachedBytes; - _readCachedEvents = readCachedEvents; - _readUncachedBytes = readUncachedBytes; - _readUncachedEvents = readUncachedEvents; + static CounterSubMetric CreateSubMetric(CounterMetric metric, string source, string user) { + var readTag = new KeyValuePair("activity", "read"); + var sourceTag = new KeyValuePair("source", source); + var userTag = new KeyValuePair("user", user); + return new CounterSubMetric(metric, [readTag, sourceTag, userTag]); } - public void OnRead(ILogRecord record, bool cached) { + public void OnRead(ILogRecord record, ITransactionFileTracker.Source source) { if (record is not PrepareLogRecord prepare) return; - if (cached) { - _readCachedBytes.Add(prepare.Data.Length + prepare.Metadata.Length); - _readCachedEvents.Add(1); - } else { - _readUncachedBytes.Add(prepare.Data.Length + prepare.Metadata.Length); - _readUncachedEvents.Add(1); - } + var (bytes, events) = _subMetrics[(int)source]; + bytes.Add(prepare.Data.Length + prepare.Metadata.Length); + events.Add(1); } } diff --git a/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTrackerFactory.cs b/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTrackerFactory.cs index f38b3a6d134..eb431564f40 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTrackerFactory.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTrackerFactory.cs @@ -1,7 +1,6 @@ #nullable enable using System.Collections.Concurrent; -using System.Collections.Generic; using EventStore.Core.Metrics; namespace EventStore.Core.TransactionLog.Chunks; @@ -21,17 +20,7 @@ public ITransactionFileTracker GetOrAdd(string user) { } private ITransactionFileTracker Create(string user) { - var readTag = new KeyValuePair("activity", "read"); - var cachedTag = new KeyValuePair("cached", "true"); - var uncachedTag = new KeyValuePair("cached", "false"); - var userTag = new KeyValuePair("user", user); - - var tracker = new TFChunkTracker( - readCachedBytes: new CounterSubMetric(_byteMetric, [readTag, cachedTag, userTag]), - readCachedEvents: new CounterSubMetric(_eventMetric, [readTag, cachedTag, userTag]), - readUncachedBytes: new CounterSubMetric(_byteMetric, [readTag, uncachedTag, userTag]), - readUncachedEvents: new CounterSubMetric(_eventMetric, [readTag, uncachedTag, userTag])); - + var tracker = new TFChunkTracker(_eventMetric, _byteMetric, user); return tracker; } diff --git a/src/EventStore.Core/TransactionLog/ITransactionFileTracker.cs b/src/EventStore.Core/TransactionLog/ITransactionFileTracker.cs index 12505106c8b..c676e9cc636 100644 --- a/src/EventStore.Core/TransactionLog/ITransactionFileTracker.cs +++ b/src/EventStore.Core/TransactionLog/ITransactionFileTracker.cs @@ -5,12 +5,20 @@ namespace EventStore.Core.TransactionLog; public interface ITransactionFileTracker { - void OnRead(ILogRecord record, bool cached); + void OnRead(ILogRecord record, Source source); + + enum Source { + Unknown, + Archive, + ChunkCache, + Disk, + EnumLength, + }; static readonly ITransactionFileTracker NoOp = new NoOp(); } file class NoOp : ITransactionFileTracker { - public void OnRead(ILogRecord record, bool cached) { + public void OnRead(ILogRecord record, ITransactionFileTracker.Source source) { } } From 24fa536f5828d3c3c216c8a62eba6382c73f0795 Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Fri, 22 Nov 2024 10:49:13 +0000 Subject: [PATCH 08/38] plumb more api --- ...hird_one_deleted_each_read_index_should.cs | 3 ++- ..._first_stream_deleted_read_index_should.cs | 3 ++- ...count_specified_with_maxage_more_strict.cs | 3 ++- ...unt_specified_with_maxcount_more_strict.cs | 3 ++- ...hen_having_stream_with_maxage_specified.cs | 3 ++- ...n_having_stream_with_maxcount_specified.cs | 3 ++- ...ng_stream_with_truncatebefore_specified.cs | 3 ++- ...ith_maxcount_and_streams_have_same_hash.cs | 3 ++- ...count_specified_with_maxage_more_strict.cs | 3 ++- ...unt_specified_with_maxcount_more_strict.cs | 3 ++- ...hen_having_stream_with_maxage_specified.cs | 3 ++- ...n_having_stream_with_maxcount_specified.cs | 3 ++- ...ng_stream_with_truncatebefore_specified.cs | 3 ++- .../MaxAgeMaxCount/with_big_max_age.cs | 3 ++- .../MaxAgeMaxCount/with_big_max_count.cs | 3 ++- .../MaxAgeMaxCount/with_big_start_from.cs | 3 ++- ...th_invalid_max_age_and_normal_max_count.cs | 3 ++- ...th_invalid_max_count_and_normal_max_age.cs | 3 ++- .../MaxAgeMaxCount/with_invalid_metadata.cs | 3 ++- .../MaxAgeMaxCount/with_too_big_max_age.cs | 3 ++- ...th_too_big_max_age_and_normal_max_count.cs | 3 ++- .../MaxAgeMaxCount/with_too_big_max_count.cs | 3 ++- ...th_too_big_max_count_and_normal_max_age.cs | 3 ++- .../MaxAgeMaxCount/with_too_big_start_from.cs | 3 ++- ...runcatebefore_greater_than_int_maxvalue.cs | 3 ++- .../when_deleting_duplicate_events.cs | 3 ++- ...hunks_in_2nd_chunk__in_db_with_3_chunks.cs | 7 +++--- ...ng_through_2_chunks_in_db_with_2_chunks.cs | 7 +++--- ...ng_through_2_chunks_in_db_with_3_chunks.cs | 7 +++--- ...scavenging_tfchunk_with_deleted_records.cs | 2 +- ...en_scavenging_tfchunk_with_transactions.cs | 14 +++++------ ...ersion0_log_records_and_deleted_records.cs | 2 +- ...version0_log_records_using_transactions.cs | 14 +++++------ ..._events_and_metaevents_are_in_one_chunk.cs | 5 ++-- ..._but_some_events_are_in_multiple_chunks.cs | 5 ++-- ...ut_some_events_are_in_multiple_chunks_2.cs | 5 ++-- ..._some_metaevents_are_in_multiple_chunks.cs | 5 ++-- ..._but_some_events_are_in_multiple_chunks.cs | 5 ++-- ...s_softdeleted_with_log_record_version_0.cs | 5 ++-- ...ixed_log_record_version_0_and_version_1.cs | 5 ++-- ...ng_through_2_chunks_in_db_with_2_chunks.cs | 3 ++- ...uential_write_request_read_index_should.cs | 3 ++- ...d_prepares_in_the_end_read_index_should.cs | 23 ++++++++++--------- ...s_spanning_few_chunks_read_index_should.cs | 23 ++++++++++--------- ...rmingled_transactions_read_index_should.cs | 15 ++++++------ ...s_spanning_few_chunks_read_index_should.cs | 15 ++++++------ ...ith_index_on_disk_and_then_reopening_db.cs | 5 ++-- ...h_index_in_memory_and_then_reopening_db.cs | 3 ++- ...h_index_in_memory_and_then_reopening_db.cs | 5 ++-- .../Storage/ReaderIndex/IReadIndex.cs | 2 +- 50 files changed, 154 insertions(+), 109 deletions(-) diff --git a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_three_collisioned_streams_with_different_number_of_events_third_one_deleted_each_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_three_collisioned_streams_with_different_number_of_events_third_one_deleted_each_read_index_should.cs index c348fb8ace0..7bbe3324849 100644 --- a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_three_collisioned_streams_with_different_number_of_events_third_one_deleted_each_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_three_collisioned_streams_with_different_number_of_events_third_one_deleted_each_read_index_should.cs @@ -1,6 +1,7 @@ using System.Linq; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -556,7 +557,7 @@ public void [Test] public void return_all_prepares_on_read_all_forward() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(3 + 5 + 7 + 1, events.Length); diff --git a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_two_collisioned_streams_one_event_each_first_stream_deleted_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_two_collisioned_streams_one_event_each_first_stream_deleted_read_index_should.cs index 034505689b4..48be8156e13 100644 --- a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_two_collisioned_streams_one_event_each_first_stream_deleted_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_two_collisioned_streams_one_event_each_first_stream_deleted_read_index_should.cs @@ -1,6 +1,7 @@ using System.Linq; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -108,7 +109,7 @@ public void not_return_range_for_non_existing_stream_with_same_hash() { [Test] public void return_all_events_on_read_all_forward() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(3, events.Length); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_both_maxage_and_maxcount_specified_with_maxage_more_strict.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_both_maxage_and_maxcount_specified_with_maxage_more_strict.cs index 27aa32ac76c..fd0b96ba1a1 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_both_maxage_and_maxcount_specified_with_maxage_more_strict.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_both_maxage_and_maxcount_specified_with_maxage_more_strict.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -70,7 +71,7 @@ public void backward_range_read_doesnt_return_expired_records() { [Test] public void read_all_forward_doesnt_return_expired_records() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(3, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_both_maxage_and_maxcount_specified_with_maxcount_more_strict.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_both_maxage_and_maxcount_specified_with_maxcount_more_strict.cs index 6eab5b6d722..884285d620f 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_both_maxage_and_maxcount_specified_with_maxcount_more_strict.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_both_maxage_and_maxcount_specified_with_maxcount_more_strict.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -75,7 +76,7 @@ public void backward_range_read_doesnt_return_expired_records() { [Test] public void read_all_forward_doesnt_return_expired_records() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(4, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r4, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_maxage_specified.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_maxage_specified.cs index 1ebf7267679..e6adf61eb8a 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_maxage_specified.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_maxage_specified.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -70,7 +71,7 @@ public void backward_range_read_doesnt_return_expired_records() { [Test] public void read_all_forward_doesnt_return_expired_records() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(3, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_maxcount_specified.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_maxcount_specified.cs index b8076e4df44..d3cf754fbfc 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_maxcount_specified.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_maxcount_specified.cs @@ -1,5 +1,6 @@ using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -73,7 +74,7 @@ public void backward_range_read_doesnt_return_expired_records() { [Test] public void read_all_forward_doesnt_return_expired_records() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(5, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r3, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_truncatebefore_specified.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_truncatebefore_specified.cs index 42d5157e1e7..2677984d474 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_truncatebefore_specified.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_truncatebefore_specified.cs @@ -1,4 +1,5 @@ using EventStore.Core.Data; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -72,7 +73,7 @@ public void backward_range_read_doesnt_return_expired_records() { [Test] public void read_all_forward_doesnt_return_expired_records() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(5, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r3, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_one_stream_with_maxage_and_other_stream_with_maxcount_and_streams_have_same_hash.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_one_stream_with_maxage_and_other_stream_with_maxcount_and_streams_have_same_hash.cs index 433278ff472..d0110414240 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_one_stream_with_maxage_and_other_stream_with_maxcount_and_streams_have_same_hash.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_one_stream_with_maxage_and_other_stream_with_maxcount_and_streams_have_same_hash.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -148,7 +149,7 @@ public void backward_range_read_doesnt_return_expired_records_for_stream_2() { [Test] public void read_all_forward_returns_all_records_including_expired_ones() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(12, records.Count); Assert.AreEqual(_r11, records[0].Event); Assert.AreEqual(_r21, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_both_maxage_and_maxcount_specified_with_maxage_more_strict.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_both_maxage_and_maxcount_specified_with_maxage_more_strict.cs index 35f17806393..1d36bfa537a 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_both_maxage_and_maxcount_specified_with_maxage_more_strict.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_both_maxage_and_maxcount_specified_with_maxage_more_strict.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -71,7 +72,7 @@ public void backward_range_read_doesnt_return_expired_records() { [Test] public void read_all_forward_returns_all_records_including_expired_ones() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r2, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_both_maxage_and_maxcount_specified_with_maxcount_more_strict.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_both_maxage_and_maxcount_specified_with_maxcount_more_strict.cs index 1761bf61d05..f0a08e3de3c 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_both_maxage_and_maxcount_specified_with_maxcount_more_strict.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_both_maxage_and_maxcount_specified_with_maxcount_more_strict.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -73,7 +74,7 @@ public void backward_range_read_doesnt_return_expired_records() { [Test] public void read_all_forward_returns_all_records_including_expired_ones() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r2, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_maxage_specified.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_maxage_specified.cs index 8de7a90b8ad..9abb210aa48 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_maxage_specified.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_maxage_specified.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -70,7 +71,7 @@ public void backward_range_read_doesnt_return_expired_records() { [Test] public void read_all_forward_returns_all_records_including_expired_ones() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r2, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_maxcount_specified.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_maxcount_specified.cs index baf5a226725..269d1ca7fc0 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_maxcount_specified.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_maxcount_specified.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -73,7 +74,7 @@ public void backward_range_read_doesnt_return_expired_records() { [Test] public void read_all_forward_returns_all_records_including_expired_ones() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r2, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_truncatebefore_specified.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_truncatebefore_specified.cs index 488ff2e3907..d9adb82e91c 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_truncatebefore_specified.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_truncatebefore_specified.cs @@ -1,4 +1,5 @@ using EventStore.Core.Data; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -69,7 +70,7 @@ public void backward_range_read_doesnt_return_expired_records() { [Test] public void read_all_forward_returns_all_records_including_expired_ones() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r2, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_max_age.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_max_age.cs index eb47606178f..e897c068bf5 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_max_age.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_max_age.cs @@ -2,6 +2,7 @@ using EventStore.Core.Data; using EventStore.Core.Services; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -85,7 +86,7 @@ public void backward_range_read_returns_all_records() { [Test] public void read_all_forward_returns_all_records() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r2, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_max_count.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_max_count.cs index 9a2ddc5e54c..90338b10abe 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_max_count.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_max_count.cs @@ -2,6 +2,7 @@ using EventStore.Core.Data; using EventStore.Core.Services; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -85,7 +86,7 @@ public void backward_range_read_returns_all_records() { [Test] public void read_all_forward_returns_all_records() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r2, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_start_from.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_start_from.cs index 0c6e7394fc4..49aa30264c7 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_start_from.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_start_from.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -74,7 +75,7 @@ public void backward_range_read_returns_no_records() { [Test] public void read_all_forward_returns_all_records() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r2, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_max_age_and_normal_max_count.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_max_age_and_normal_max_count.cs index 628bf1a5530..2cb0bd1d007 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_max_age_and_normal_max_count.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_max_age_and_normal_max_count.cs @@ -1,5 +1,6 @@ using System; using EventStore.Core.Data; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -77,7 +78,7 @@ public void on_backward_range_read_metadata_is_ignored() { [Test] public void on_read_all_forward_metadata_is_ignored() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).Records; + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).Records; if (LogFormatHelper.IsV2) { Assert.AreEqual(6, records.Count); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_max_count_and_normal_max_age.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_max_count_and_normal_max_age.cs index c0958dbe544..e12c752fbfd 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_max_count_and_normal_max_age.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_max_count_and_normal_max_age.cs @@ -1,5 +1,6 @@ using System; using EventStore.Core.Data; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -77,7 +78,7 @@ public void on_backward_range_read_metadata_is_ignored() { [Test] public void on_read_all_forward_metadata_is_ignored() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).Records; + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).Records; if (LogFormatHelper.IsV2) { Assert.AreEqual(6, records.Count); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_metadata.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_metadata.cs index 421e66f6257..7725a89da35 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_metadata.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_metadata.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -77,7 +78,7 @@ public void on_backward_range_read_all_metadata_is_ignored() { [Test] public void on_read_all_forward_all_metadata_is_ignored() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r2, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_age.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_age.cs index 2d31673f5b0..cb3e80cdacb 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_age.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_age.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -77,7 +78,7 @@ public void backward_range_read_returns_all_records() { [Test] public void read_all_forward_returns_all_records() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r2, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_age_and_normal_max_count.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_age_and_normal_max_count.cs index 70285f32166..a15b725ae84 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_age_and_normal_max_count.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_age_and_normal_max_count.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -78,7 +79,7 @@ public void on_backward_range_read_all_metadata_is_ignored() { [Test] public void on_read_all_forward_all_metadata_is_ignored() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r2, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_count.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_count.cs index 047d5b1cec1..8fcf85f0ab2 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_count.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_count.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -77,7 +78,7 @@ public void backward_range_read_returns_all_records() { [Test] public void read_all_forward_returns_all_records() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r2, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_count_and_normal_max_age.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_count_and_normal_max_age.cs index edda0143129..4adbb42e8ac 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_count_and_normal_max_age.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_count_and_normal_max_age.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -80,7 +81,7 @@ public void on_backward_range_read_all_metadata_is_ignored() { [Test] public void on_read_all_forward_all_metadata_is_ignored() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r2, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_start_from.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_start_from.cs index a7932690e74..7241edeaddf 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_start_from.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_start_from.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -77,7 +78,7 @@ public void backward_range_read_returns_all_records() { [Test] public void read_all_forward_returns_all_records() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r2, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_truncatebefore_greater_than_int_maxvalue.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_truncatebefore_greater_than_int_maxvalue.cs index 7c154d78365..1df19f9606a 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_truncatebefore_greater_than_int_maxvalue.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_truncatebefore_greater_than_int_maxvalue.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -86,7 +87,7 @@ public void backward_range_read_returns_records_after_truncate_before() { [Test] public void read_all_forward_returns_all_records() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r2, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_duplicate_events.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_duplicate_events.cs index 80d6aa37ad4..8e52afcc4a3 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_duplicate_events.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_duplicate_events.cs @@ -1,5 +1,6 @@ using System.Linq; using EventStore.Core.Data; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.Services.Storage.Scavenge { @@ -52,7 +53,7 @@ protected override void WriteTestScenario() { [Test] public void read_all_events_forward_does_not_return_duplicate() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(11, events.Length); diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_2nd_chunk__in_db_with_3_chunks.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_2nd_chunk__in_db_with_3_chunks.cs index 8e2e68ed4aa..cde8dc72134 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_2nd_chunk__in_db_with_3_chunks.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_2nd_chunk__in_db_with_3_chunks.cs @@ -1,6 +1,7 @@ using System; using System.Linq; using EventStore.Core.Data; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; @@ -28,7 +29,7 @@ protected override void WriteTestScenario() { [Test] public void read_all_forward_does_not_return_scavenged_deleted_stream_events_and_return_remaining() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(2, events.Length); @@ -58,7 +59,7 @@ public void read_all_backward_from_beginning_of_second_chunk_returns_no_records( [Test] public void read_all_forward_from_beginning_of_2nd_chunk_with_max_2_record_returns_delete_record_and_record_from_3rd_chunk() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(10000, 10000), 100).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(10000, 10000), 100, ITransactionFileTracker.NoOp).EventRecords() .Take(2) .Select(r => r.Event) .ToArray(); @@ -69,7 +70,7 @@ public void [Test] public void read_all_forward_with_max_5_records_returns_2_records_from_2nd_chunk() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 5).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 5, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(2, events.Length); diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_db_with_2_chunks.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_db_with_2_chunks.cs index 703d784f407..cbbd16a7e4d 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_db_with_2_chunks.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_db_with_2_chunks.cs @@ -1,5 +1,6 @@ using System.Linq; using EventStore.Core.Data; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.Services.Storage.Scavenge { @@ -24,7 +25,7 @@ protected override void WriteTestScenario() { [Test] public void read_all_forward_returns_events_only_from_uncompleted_chunk_and_delete_record() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(3, events.Length); @@ -55,7 +56,7 @@ public void read_all_backward_from_beginning_of_second_chunk_returns_no_records( [Test] public void read_all_forward_from_beginning_of_second_chunk_with_max_1_record_returns_5th_record() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(10000, 10000), 1).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(10000, 10000), 1, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(1, events.Length); @@ -64,7 +65,7 @@ public void read_all_forward_from_beginning_of_second_chunk_with_max_1_record_re [Test] public void read_all_forward_with_max_5_records_returns_3_records_from_second_chunk_and_delete_record() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 5).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 5, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(3, events.Length); diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_db_with_3_chunks.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_db_with_3_chunks.cs index 45a0481513b..6a65f5d88c3 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_db_with_3_chunks.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_db_with_3_chunks.cs @@ -1,5 +1,6 @@ using System.Linq; using EventStore.Core.Data; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.Services.Storage.Scavenge { @@ -26,7 +27,7 @@ protected override void WriteTestScenario() { [Test] public void read_all_forward_does_not_return_scavenged_deleted_stream_events_and_return_remaining_plus_delete_record() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(2, events.Length); @@ -56,7 +57,7 @@ public void read_all_backward_from_beginning_of_second_chunk_returns_no_records( [Test] public void read_all_forward_from_beginning_of_2nd_chunk_with_max_1_record_returns_1st_record_from_3rd_chunk() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(10000, 10000), 100).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(10000, 10000), 100, ITransactionFileTracker.NoOp).EventRecords() .Take(1) .Select(r => r.Event) .ToArray(); @@ -66,7 +67,7 @@ public void read_all_forward_from_beginning_of_2nd_chunk_with_max_1_record_retur [Test] public void read_all_forward_with_max_5_records_returns_2_records_from_2nd_chunk_plus_delete_record() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 5).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 5, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(2, events.Length); diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_deleted_records.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_deleted_records.cs index a8b0c1def2a..5f1adc0e9ef 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_deleted_records.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_deleted_records.cs @@ -38,7 +38,7 @@ protected override void WriteTestScenario() { [Test] public void should_be_able_to_read_the_all_stream() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(5, events.Count()); diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_transactions.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_transactions.cs index 9a64799821c..fdba042fbf7 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_transactions.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_transactions.cs @@ -198,7 +198,7 @@ public void return_correct_range_on_from_end_range_query_for_smaller_stream_with [Test] public void read_all_events_forward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10).Records; + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(6, records.Count); Assert.AreEqual(_p2.EventId, records[0].Event.EventId); Assert.AreEqual(_p4.EventId, records[1].Event.EventId); @@ -225,7 +225,7 @@ public void read_all_events_backward_returns_all_events_in_correct_order() { [Test] public void read_all_events_forward_returns_no_transaction_records_when_prepare_position_is_greater_than_last_prepare_in_commit() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(_t1CommitPos, _t1CommitPos), 10).Records; + var records = ReadIndex.ReadAllEventsForward(new TFPos(_t1CommitPos, _t1CommitPos), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(1, records.Count); Assert.AreEqual(_random1.EventId, records[0].Event.EventId); } @@ -240,7 +240,7 @@ public void [Test] public void read_all_events_forward_returns_correct_events_starting_in_the_middle_of_tf() { var res1 = ReadIndex.ReadAllEventsForward(new TFPos(_t2CommitPos, _p4.LogPosition), - 10); // end of first commit + 10, ITransactionFileTracker.NoOp); // end of first commit Assert.AreEqual(5, res1.Records.Count); Assert.AreEqual(_p4.EventId, res1.Records[0].Event.EventId); Assert.AreEqual(_p1.EventId, res1.Records[1].Event.EventId); @@ -264,7 +264,7 @@ public void read_all_events_backward_returns_correct_events_starting_in_the_midd Assert.AreEqual(_p4.EventId, res1.Records[2].Event.EventId); Assert.AreEqual(_p2.EventId, res1.Records[3].Event.EventId); - var res2 = ReadIndex.ReadAllEventsForward(res1.PrevPos, 10); + var res2 = ReadIndex.ReadAllEventsForward(res1.PrevPos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(2, res2.Records.Count); Assert.AreEqual(_p5.EventId, res2.Records[0].Event.EventId); } @@ -276,7 +276,7 @@ public void all_records_can_be_read_sequentially_page_by_page_in_forward_pass() int count = 0; var pos = new TFPos(0, 0); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsForward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsForward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count].EventId, result.Records[0].Event.EventId); pos = result.NextPos; @@ -310,7 +310,7 @@ public void position_returned_for_prev_page_when_traversing_forward_allow_to_tra int count = 0; var pos = new TFPos(0, 0); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsForward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsForward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count].EventId, result.Records[0].Event.EventId); @@ -345,7 +345,7 @@ public void position_returned_for_prev_page_when_traversing_backward_allow_to_tr var localPos = result.PrevPos; int localCount = 0; IndexReadAllResult localResult; - while ((localResult = ReadIndex.ReadAllEventsForward(localPos, 1)).Records.Count != 0) { + while ((localResult = ReadIndex.ReadAllEventsForward(localPos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, localResult.Records.Count); Assert.AreEqual(recs[count - 1 - localCount].EventId, localResult.Records[0].Event.EventId); localPos = localResult.NextPos; diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_version0_log_records_and_deleted_records.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_version0_log_records_and_deleted_records.cs index 3ca64ed8b28..4a98492f652 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_version0_log_records_and_deleted_records.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_version0_log_records_and_deleted_records.cs @@ -46,7 +46,7 @@ protected override void WriteTestScenario() { [Test] public void should_be_able_to_read_the_all_stream() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).Records.Select(r => r.Event).ToArray(); + var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).Records.Select(r => r.Event).ToArray(); Assert.AreEqual(5, events.Count()); Assert.AreEqual(_event1.EventId, events[0].EventId); Assert.AreEqual(_event2.EventId, events[1].EventId); diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_version0_log_records_using_transactions.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_version0_log_records_using_transactions.cs index 84bf5d8664e..2ffa8eaac53 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_version0_log_records_using_transactions.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_version0_log_records_using_transactions.cs @@ -247,7 +247,7 @@ public void return_correct_range_on_from_end_range_query_for_smaller_stream_with [Test] public void read_all_events_forward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10).Records; + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(6, records.Count); Assert.AreEqual(_p2.EventId, records[0].Event.EventId); Assert.AreEqual(_p4.EventId, records[1].Event.EventId); @@ -274,7 +274,7 @@ public void read_all_events_backward_returns_all_events_in_correct_order() { [Test] public void read_all_events_forward_returns_no_transaction_records_when_prepare_position_is_greater_than_last_prepare_in_commit() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(_t1CommitPos, _t1CommitPos), 10).Records; + var records = ReadIndex.ReadAllEventsForward(new TFPos(_t1CommitPos, _t1CommitPos), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(1, records.Count); Assert.AreEqual(_random1.EventId, records[0].Event.EventId); } @@ -289,7 +289,7 @@ public void [Test] public void read_all_events_forward_returns_correct_events_starting_in_the_middle_of_tf() { var res1 = ReadIndex.ReadAllEventsForward(new TFPos(_t2CommitPos, _p4.LogPosition), - 10); // end of first commit + 10, ITransactionFileTracker.NoOp); // end of first commit Assert.AreEqual(5, res1.Records.Count); Assert.AreEqual(_p4.EventId, res1.Records[0].Event.EventId); Assert.AreEqual(_p1.EventId, res1.Records[1].Event.EventId); @@ -313,7 +313,7 @@ public void read_all_events_backward_returns_correct_events_starting_in_the_midd Assert.AreEqual(_p4.EventId, res1.Records[2].Event.EventId); Assert.AreEqual(_p2.EventId, res1.Records[3].Event.EventId); - var res2 = ReadIndex.ReadAllEventsForward(res1.PrevPos, 10); + var res2 = ReadIndex.ReadAllEventsForward(res1.PrevPos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(2, res2.Records.Count); Assert.AreEqual(_p5.EventId, res2.Records[0].Event.EventId); } @@ -325,7 +325,7 @@ public void all_records_can_be_read_sequentially_page_by_page_in_forward_pass() int count = 0; var pos = new TFPos(0, 0); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsForward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsForward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count].EventId, result.Records[0].Event.EventId); pos = result.NextPos; @@ -359,7 +359,7 @@ public void position_returned_for_prev_page_when_traversing_forward_allow_to_tra int count = 0; var pos = new TFPos(0, 0); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsForward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsForward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count].EventId, result.Records[0].Event.EventId); @@ -394,7 +394,7 @@ public void position_returned_for_prev_page_when_traversing_backward_allow_to_tr var localPos = result.PrevPos; int localCount = 0; IndexReadAllResult localResult; - while ((localResult = ReadIndex.ReadAllEventsForward(localPos, 1)).Records.Count != 0) { + while ((localResult = ReadIndex.ReadAllEventsForward(localPos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, localResult.Records.Count); Assert.AreEqual(recs[count - 1 - localCount].EventId, localResult.Records[0].Event.EventId); localPos = localResult.NextPos; diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_and_all_events_and_metaevents_are_in_one_chunk.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_and_all_events_and_metaevents_are_in_one_chunk.cs index 2e0fa356520..f1fbe6d219b 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_and_all_events_and_metaevents_are_in_one_chunk.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_and_all_events_and_metaevents_are_in_one_chunk.cs @@ -1,6 +1,7 @@ using System.Linq; using EventStore.Core.Data; using EventStore.Core.Tests.TransactionLog.Scavenging.Helpers; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -59,7 +60,7 @@ public void the_metastream_is_absent_logically() { [Test] public void the_stream_is_absent_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); - Assert.IsEmpty(ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000).Records + Assert.IsEmpty(ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Where(x => x.Event.EventStreamId == "test")); Assert.IsEmpty(ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records .Where(x => x.Event.EventStreamId == "test")); @@ -68,7 +69,7 @@ public void the_stream_is_absent_physically() { [Test] public void the_metastream_is_absent_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); - Assert.IsEmpty(ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000).Records + Assert.IsEmpty(ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Where(x => x.Event.EventStreamId == "$$test")); Assert.IsEmpty(ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records .Where(x => x.Event.EventStreamId == "$$test")); diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_events_are_in_multiple_chunks.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_events_are_in_multiple_chunks.cs index 4fdaa0dc6a0..525fac9e935 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_events_are_in_multiple_chunks.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_events_are_in_multiple_chunks.cs @@ -1,6 +1,7 @@ using System.Linq; using EventStore.Core.Data; using EventStore.Core.Tests.TransactionLog.Scavenging.Helpers; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -74,7 +75,7 @@ public void the_metastream_is_present_logically() { public void the_stream_is_present_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); Assert.AreEqual(1, - ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000).Records + ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Count(x => x.Event.EventStreamId == "test")); Assert.AreEqual(1, ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records.Count(x => x.Event.EventStreamId == "test")); @@ -84,7 +85,7 @@ public void the_stream_is_present_physically() { public void the_metastream_is_present_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); Assert.AreEqual(1, - ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000).Records + ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Count(x => x.Event.EventStreamId == "$$test")); Assert.AreEqual(1, ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records.Count(x => x.Event.EventStreamId == "$$test")); diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_events_are_in_multiple_chunks_2.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_events_are_in_multiple_chunks_2.cs index da046b7aca6..9c81eb7f204 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_events_are_in_multiple_chunks_2.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_events_are_in_multiple_chunks_2.cs @@ -2,6 +2,7 @@ using System.Linq; using EventStore.Core.Data; using EventStore.Core.Tests.TransactionLog.Scavenging.Helpers; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using EventStore.LogCommon; using NUnit.Framework; @@ -78,7 +79,7 @@ public void the_metastream_is_present_logically() { public void the_stream_is_present_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); Assert.AreEqual(1, - ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000).Records + ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Count(x => x.Event.EventStreamId == "test")); Assert.AreEqual(1, ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records.Count(x => x.Event.EventStreamId == "test")); @@ -88,7 +89,7 @@ public void the_stream_is_present_physically() { public void the_metastream_is_present_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); Assert.AreEqual(1, - ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000).Records + ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Count(x => x.Event.EventStreamId == "$$test")); Assert.AreEqual(1, ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records.Count(x => x.Event.EventStreamId == "$$test")); diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_metaevents_are_in_multiple_chunks.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_metaevents_are_in_multiple_chunks.cs index b1e34057cc2..5741334b0e2 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_metaevents_are_in_multiple_chunks.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_metaevents_are_in_multiple_chunks.cs @@ -1,6 +1,7 @@ using System.Linq; using EventStore.Core.Data; using EventStore.Core.Tests.TransactionLog.Scavenging.Helpers; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -76,7 +77,7 @@ public void the_metastream_is_present_logically() { public void the_stream_is_present_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); Assert.AreEqual(1, - ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000).Records + ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Count(x => x.Event.EventStreamId == "test")); Assert.AreEqual(1, ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records.Count(x => x.Event.EventStreamId == "test")); @@ -86,7 +87,7 @@ public void the_stream_is_present_physically() { public void the_metastream_is_present_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); Assert.AreEqual(1, - ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000).Records + ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Count(x => x.Event.EventStreamId == "$$test")); Assert.AreEqual(1, ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records.Count(x => x.Event.EventStreamId == "$$test")); diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_with_log_version_0_but_some_events_are_in_multiple_chunks.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_with_log_version_0_but_some_events_are_in_multiple_chunks.cs index 8f73ac2bf3b..e12d81abe87 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_with_log_version_0_but_some_events_are_in_multiple_chunks.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_with_log_version_0_but_some_events_are_in_multiple_chunks.cs @@ -2,6 +2,7 @@ using System.Linq; using EventStore.Core.Data; using EventStore.Core.Tests.TransactionLog.Scavenging.Helpers; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using EventStore.LogCommon; using NUnit.Framework; @@ -73,7 +74,7 @@ public void the_metastream_is_present_logically() { public void the_stream_is_present_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); Assert.AreEqual(1, - ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000).Records + ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Count(x => x.Event.EventStreamId == "test")); Assert.AreEqual(1, ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records.Count(x => x.Event.EventStreamId == "test")); @@ -83,7 +84,7 @@ public void the_stream_is_present_physically() { public void the_metastream_is_present_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); Assert.AreEqual(1, - ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000).Records + ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Count(x => x.Event.EventStreamId == "$$test"), "Read $$test stream forward"); Assert.AreEqual(1, ReadIndex.ReadAllEventsBackward(headOfTf, 10).Records.Count(x => x.Event.EventStreamId == "$$test"), diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_with_log_record_version_0.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_with_log_record_version_0.cs index 7739f681281..8c9a5bdb0ee 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_with_log_record_version_0.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_with_log_record_version_0.cs @@ -1,6 +1,7 @@ using System.Linq; using EventStore.Core.Data; using EventStore.Core.Tests.TransactionLog.Scavenging.Helpers; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using EventStore.LogCommon; using NUnit.Framework; @@ -53,7 +54,7 @@ public void the_metastream_is_absent_logically() { [Test] public void the_stream_is_absent_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); - Assert.IsEmpty(ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000).Records + Assert.IsEmpty(ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Where(x => x.Event.EventStreamId == "test")); Assert.IsEmpty(ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records .Where(x => x.Event.EventStreamId == "test")); @@ -62,7 +63,7 @@ public void the_stream_is_absent_physically() { [Test] public void the_metastream_is_absent_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); - Assert.IsEmpty(ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000).Records + Assert.IsEmpty(ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Where(x => x.Event.EventStreamId == "$$test")); Assert.IsEmpty(ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records .Where(x => x.Event.EventStreamId == "$$test")); diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_with_mixed_log_record_version_0_and_version_1.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_with_mixed_log_record_version_0_and_version_1.cs index 96d84c9eaa8..b60d651650b 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_with_mixed_log_record_version_0_and_version_1.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_with_mixed_log_record_version_0_and_version_1.cs @@ -1,6 +1,7 @@ using System.Linq; using EventStore.Core.Data; using EventStore.Core.Tests.TransactionLog.Scavenging.Helpers; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using EventStore.LogCommon; using NUnit.Framework; @@ -76,7 +77,7 @@ public void the_metastream_is_absent_logically() { [Test] public void the_stream_is_absent_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); - Assert.IsEmpty(ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000).Records + Assert.IsEmpty(ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Where(x => x.Event.EventStreamId == _deletedStream)); Assert.IsEmpty(ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records .Where(x => x.Event.EventStreamId == _deletedStream)); @@ -85,7 +86,7 @@ public void the_stream_is_absent_physically() { [Test] public void the_metastream_is_absent_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); - Assert.IsEmpty(ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000).Records + Assert.IsEmpty(ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Where(x => x.Event.EventStreamId == _deletedMetaStream)); Assert.IsEmpty(ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records .Where(x => x.Event.EventStreamId == _deletedMetaStream)); diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_writing_delete_prepare_without_commit_on_stream_spanning_through_2_chunks_in_db_with_2_chunks.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_writing_delete_prepare_without_commit_on_stream_spanning_through_2_chunks_in_db_with_2_chunks.cs index 311b52e7d9b..4f86324fee2 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_writing_delete_prepare_without_commit_on_stream_spanning_through_2_chunks_in_db_with_2_chunks.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_writing_delete_prepare_without_commit_on_stream_spanning_through_2_chunks_in_db_with_2_chunks.cs @@ -2,6 +2,7 @@ using System.Linq; using EventStore.Core.Data; using EventStore.Core.Services; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -56,7 +57,7 @@ public void read_stream_events_backward_should_return_stream_deleted() { [Test] public void read_all_forward_returns_all_events() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(2, events.Length); diff --git a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_multievent_sequential_write_request_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_multievent_sequential_write_request_read_index_should.cs index 6242b58eda6..978901fa8da 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_multievent_sequential_write_request_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_multievent_sequential_write_request_read_index_should.cs @@ -1,5 +1,6 @@ using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -87,7 +88,7 @@ public void return_correct_range_on_from_end_range_query_for_stream_with_from_en [Test] public void read_all_events_forward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10).Records; + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(3, records.Count); Assert.AreEqual(_p1, records[0].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_and_some_uncommited_prepares_in_the_end_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_and_some_uncommited_prepares_in_the_end_read_index_should.cs index 225f96da755..90ba6d45e5c 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_and_some_uncommited_prepares_in_the_end_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_and_some_uncommited_prepares_in_the_end_read_index_should.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; @@ -60,7 +61,7 @@ protected override void WriteTestScenario() { [Test] public void read_all_events_forward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10).Records; + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(5, records.Count); Assert.AreEqual(_p2, records[0].Event); @@ -85,7 +86,7 @@ public void read_all_events_backward_returns_all_events_in_correct_order() { [Test] public void read_all_events_forward_returns_nothing_when_prepare_position_is_greater_than_last_prepare_in_commit() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(_t1CommitPos, _t1CommitPos), 10).Records; + var records = ReadIndex.ReadAllEventsForward(new TFPos(_t1CommitPos, _t1CommitPos), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(0, records.Count); } @@ -98,7 +99,7 @@ public void [Test] public void read_all_events_forward_returns_correct_events_starting_in_the_middle_of_tf() { - var res1 = ReadIndex.ReadAllEventsForward(new TFPos(_t2CommitPos, _p4.LogPosition), 10); + var res1 = ReadIndex.ReadAllEventsForward(new TFPos(_t2CommitPos, _p4.LogPosition), 10, ITransactionFileTracker.NoOp); Assert.AreEqual(4, res1.Records.Count); Assert.AreEqual(_p4, res1.Records[0].Event); @@ -122,7 +123,7 @@ public void read_all_events_backward_returns_correct_events_starting_in_the_midd Assert.AreEqual(_p4, res1.Records[2].Event); Assert.AreEqual(_p2, res1.Records[3].Event); - var res2 = ReadIndex.ReadAllEventsForward(res1.PrevPos, 10); + var res2 = ReadIndex.ReadAllEventsForward(res1.PrevPos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(1, res2.Records.Count); Assert.AreEqual(_p5, res2.Records[0].Event); } @@ -134,7 +135,7 @@ public void all_records_can_be_read_sequentially_page_by_page_in_forward_pass() int count = 0; var pos = new TFPos(0, 0); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsForward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsForward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); pos = result.NextPos; @@ -168,7 +169,7 @@ public void position_returned_for_prev_page_when_traversing_forward_allow_to_tra int count = 0; var pos = new TFPos(0, 0); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsForward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsForward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); @@ -203,7 +204,7 @@ public void position_returned_for_prev_page_when_traversing_backward_allow_to_tr var localPos = result.PrevPos; int localCount = 0; IndexReadAllResult localResult; - while ((localResult = ReadIndex.ReadAllEventsForward(localPos, 1)).Records.Count != 0) { + while ((localResult = ReadIndex.ReadAllEventsForward(localPos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, localResult.Records.Count); Assert.AreEqual(recs[count - 1 - localCount], localResult.Records[0].Event); localPos = localResult.NextPos; @@ -220,7 +221,7 @@ public void position_returned_for_prev_page_when_traversing_backward_allow_to_tr [Test] public void reading_all_forward_at_position_with_no_commits_after_returns_prev_pos_that_allows_to_traverse_back() { - var res1 = ReadIndex.ReadAllEventsForward(new TFPos(_pos6, 0), 100); + var res1 = ReadIndex.ReadAllEventsForward(new TFPos(_pos6, 0), 100, ITransactionFileTracker.NoOp); Assert.AreEqual(0, res1.Records.Count); var recs = new[] {_p5, _p3, _p1, _p4, _p2}; // in reverse committed order @@ -239,7 +240,7 @@ public void [Test] public void reading_all_forward_at_the_very_end_returns_prev_pos_that_allows_to_traverse_back() { - var res1 = ReadIndex.ReadAllEventsForward(new TFPos(Db.Config.WriterCheckpoint.Read(), 0), 100); + var res1 = ReadIndex.ReadAllEventsForward(new TFPos(Db.Config.WriterCheckpoint.Read(), 0), 100, ITransactionFileTracker.NoOp); Assert.AreEqual(0, res1.Records.Count); var recs = new[] {_p5, _p3, _p1, _p4, _p2}; // in reverse committed order @@ -266,7 +267,7 @@ public void int count = 0; IndexReadAllResult result; TFPos pos = res1.PrevPos; - while ((result = ReadIndex.ReadAllEventsForward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsForward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); pos = result.NextPos; @@ -285,7 +286,7 @@ public void reading_all_backward_at_the_very_beginning_returns_prev_pos_that_all int count = 0; IndexReadAllResult result; TFPos pos = res1.PrevPos; - while ((result = ReadIndex.ReadAllEventsForward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsForward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); pos = result.NextPos; diff --git a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_and_some_uncommited_prepares_spanning_few_chunks_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_and_some_uncommited_prepares_spanning_few_chunks_read_index_should.cs index d2bd47b826c..3d4d1ca4424 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_and_some_uncommited_prepares_spanning_few_chunks_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_and_some_uncommited_prepares_spanning_few_chunks_read_index_should.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; @@ -61,7 +62,7 @@ protected override void WriteTestScenario() { [Test] public void read_all_events_forward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10).Records; + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(5, records.Count); Assert.AreEqual(_p2, records[0].Event); @@ -87,7 +88,7 @@ public void read_all_events_backward_returns_all_events_in_correct_order() { [Test] public void read_all_events_forward_returns_nothing_when_prepare_position_is_greater_than_last_prepare_in_commit() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(_t1CommitPos, _t1CommitPos), 10).Records; + var records = ReadIndex.ReadAllEventsForward(new TFPos(_t1CommitPos, _t1CommitPos), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(0, records.Count); } @@ -100,7 +101,7 @@ public void [Test] public void read_all_events_forward_returns_correct_events_starting_in_the_middle_of_tf() { - var res1 = ReadIndex.ReadAllEventsForward(new TFPos(_t2CommitPos, _p4.LogPosition), 10); + var res1 = ReadIndex.ReadAllEventsForward(new TFPos(_t2CommitPos, _p4.LogPosition), 10, ITransactionFileTracker.NoOp); Assert.AreEqual(4, res1.Records.Count); Assert.AreEqual(_p4, res1.Records[0].Event); @@ -124,7 +125,7 @@ public void read_all_events_backward_returns_correct_events_starting_in_the_midd Assert.AreEqual(_p4, res1.Records[2].Event); Assert.AreEqual(_p2, res1.Records[3].Event); - var res2 = ReadIndex.ReadAllEventsForward(res1.PrevPos, 10); + var res2 = ReadIndex.ReadAllEventsForward(res1.PrevPos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(1, res2.Records.Count); Assert.AreEqual(_p5, res2.Records[0].Event); } @@ -136,7 +137,7 @@ public void all_records_can_be_read_sequentially_page_by_page_in_forward_pass() int count = 0; var pos = new TFPos(0, 0); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsForward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsForward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); pos = result.NextPos; @@ -170,7 +171,7 @@ public void position_returned_for_prev_page_when_traversing_forward_allow_to_tra int count = 0; var pos = new TFPos(0, 0); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsForward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsForward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); @@ -205,7 +206,7 @@ public void position_returned_for_prev_page_when_traversing_backward_allow_to_tr var localPos = result.PrevPos; int localCount = 0; IndexReadAllResult localResult; - while ((localResult = ReadIndex.ReadAllEventsForward(localPos, 1)).Records.Count != 0) { + while ((localResult = ReadIndex.ReadAllEventsForward(localPos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, localResult.Records.Count); Assert.AreEqual(recs[count - 1 - localCount], localResult.Records[0].Event); localPos = localResult.NextPos; @@ -222,7 +223,7 @@ public void position_returned_for_prev_page_when_traversing_backward_allow_to_tr [Test] public void reading_all_forward_at_position_with_no_commits_after_returns_prev_pos_that_allows_to_traverse_back() { - var res1 = ReadIndex.ReadAllEventsForward(new TFPos(_pos6, 0), 100); + var res1 = ReadIndex.ReadAllEventsForward(new TFPos(_pos6, 0), 100, ITransactionFileTracker.NoOp); Assert.AreEqual(0, res1.Records.Count); var recs = new[] {_p5, _p3, _p1, _p4, _p2}; // in reverse committed order @@ -241,7 +242,7 @@ public void [Test] public void reading_all_forward_at_the_very_end_returns_prev_pos_that_allows_to_traverse_back() { - var res1 = ReadIndex.ReadAllEventsForward(new TFPos(Db.Config.WriterCheckpoint.Read(), 0), 100); + var res1 = ReadIndex.ReadAllEventsForward(new TFPos(Db.Config.WriterCheckpoint.Read(), 0), 100, ITransactionFileTracker.NoOp); Assert.AreEqual(0, res1.Records.Count); var recs = new[] {_p5, _p3, _p1, _p4, _p2}; // in reverse committed order @@ -268,7 +269,7 @@ public void int count = 0; IndexReadAllResult result; TFPos pos = res1.PrevPos; - while ((result = ReadIndex.ReadAllEventsForward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsForward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); pos = result.NextPos; @@ -287,7 +288,7 @@ public void reading_all_backward_at_the_very_beginning_returns_prev_pos_that_all int count = 0; IndexReadAllResult result; TFPos pos = res1.PrevPos; - while ((result = ReadIndex.ReadAllEventsForward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsForward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); pos = result.NextPos; diff --git a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_read_index_should.cs index ebe1b5a0ebe..09dd7e873a1 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_read_index_should.cs @@ -1,5 +1,6 @@ using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -160,7 +161,7 @@ public void return_correct_range_on_from_end_range_query_for_smaller_stream_with [Test] public void read_all_events_forward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10).Records; + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(5, records.Count); Assert.AreEqual(_p2, records[0].Event); @@ -186,7 +187,7 @@ public void read_all_events_backward_returns_all_events_in_correct_order() { [Test] public void read_all_events_forward_returns_nothing_when_prepare_position_is_greater_than_last_prepare_in_commit() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(_t1CommitPos, _t1CommitPos), 10).Records; + var records = ReadIndex.ReadAllEventsForward(new TFPos(_t1CommitPos, _t1CommitPos), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(0, records.Count); } @@ -199,7 +200,7 @@ public void [Test] public void read_all_events_forward_returns_correct_events_starting_in_the_middle_of_tf() { - var res1 = ReadIndex.ReadAllEventsForward(new TFPos(_t2CommitPos, _p4.LogPosition), 10); + var res1 = ReadIndex.ReadAllEventsForward(new TFPos(_t2CommitPos, _p4.LogPosition), 10, ITransactionFileTracker.NoOp); Assert.AreEqual(4, res1.Records.Count); Assert.AreEqual(_p4, res1.Records[0].Event); @@ -223,7 +224,7 @@ public void read_all_events_backward_returns_correct_events_starting_in_the_midd Assert.AreEqual(_p4, res1.Records[2].Event); Assert.AreEqual(_p2, res1.Records[3].Event); - var res2 = ReadIndex.ReadAllEventsForward(res1.PrevPos, 10); + var res2 = ReadIndex.ReadAllEventsForward(res1.PrevPos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(1, res2.Records.Count); Assert.AreEqual(_p5, res2.Records[0].Event); } @@ -235,7 +236,7 @@ public void all_records_can_be_read_sequentially_page_by_page_in_forward_pass() int count = 0; var pos = new TFPos(0, 0); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsForward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsForward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); pos = result.NextPos; @@ -269,7 +270,7 @@ public void position_returned_for_prev_page_when_traversing_forward_allow_to_tra int count = 0; var pos = new TFPos(0, 0); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsForward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsForward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); @@ -304,7 +305,7 @@ public void position_returned_for_prev_page_when_traversing_backward_allow_to_tr var localPos = result.PrevPos; int localCount = 0; IndexReadAllResult localResult; - while ((localResult = ReadIndex.ReadAllEventsForward(localPos, 1)).Records.Count != 0) { + while ((localResult = ReadIndex.ReadAllEventsForward(localPos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, localResult.Records.Count); Assert.AreEqual(recs[count - 1 - localCount], localResult.Records[0].Event); localPos = localResult.NextPos; diff --git a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_spanning_few_chunks_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_spanning_few_chunks_read_index_should.cs index 97ca1841cc8..ce5edc1a5d6 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_spanning_few_chunks_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_spanning_few_chunks_read_index_should.cs @@ -1,5 +1,6 @@ using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -162,7 +163,7 @@ public void return_correct_range_on_from_end_range_query_for_smaller_stream_with [Test] public void read_all_events_forward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10).Records; + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(5, records.Count); Assert.AreEqual(_p2, records[0].Event); @@ -188,7 +189,7 @@ public void read_all_events_backward_returns_all_events_in_correct_order() { [Test] public void read_all_events_forward_returns_nothing_when_prepare_position_is_greater_than_last_prepare_in_commit() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(_t1CommitPos, _t1CommitPos), 10).Records; + var records = ReadIndex.ReadAllEventsForward(new TFPos(_t1CommitPos, _t1CommitPos), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(0, records.Count); } @@ -201,7 +202,7 @@ public void [Test] public void read_all_events_forward_returns_correct_events_starting_in_the_middle_of_tf() { - var res1 = ReadIndex.ReadAllEventsForward(new TFPos(_t2CommitPos, _p4.LogPosition), 10); + var res1 = ReadIndex.ReadAllEventsForward(new TFPos(_t2CommitPos, _p4.LogPosition), 10, ITransactionFileTracker.NoOp); Assert.AreEqual(4, res1.Records.Count); Assert.AreEqual(_p4, res1.Records[0].Event); @@ -225,7 +226,7 @@ public void read_all_events_backward_returns_correct_events_starting_in_the_midd Assert.AreEqual(_p4, res1.Records[2].Event); Assert.AreEqual(_p2, res1.Records[3].Event); - var res2 = ReadIndex.ReadAllEventsForward(res1.PrevPos, 10); + var res2 = ReadIndex.ReadAllEventsForward(res1.PrevPos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(1, res2.Records.Count); Assert.AreEqual(_p5, res2.Records[0].Event); } @@ -237,7 +238,7 @@ public void all_records_can_be_read_sequentially_page_by_page_in_forward_pass() int count = 0; var pos = new TFPos(0, 0); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsForward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsForward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); pos = result.NextPos; @@ -271,7 +272,7 @@ public void position_returned_for_prev_page_when_traversing_forward_allow_to_tra int count = 0; var pos = new TFPos(0, 0); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsForward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsForward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); @@ -306,7 +307,7 @@ public void position_returned_for_prev_page_when_traversing_backward_allow_to_tr var localPos = result.PrevPos; int localCount = 0; IndexReadAllResult localResult; - while ((localResult = ReadIndex.ReadAllEventsForward(localPos, 1)).Records.Count != 0) { + while ((localResult = ReadIndex.ReadAllEventsForward(localPos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, localResult.Records.Count); Assert.AreEqual(recs[count - 1 - localCount], localResult.Records[0].Event); localPos = localResult.NextPos; diff --git a/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_few_chunks_with_index_on_disk_and_then_reopening_db.cs b/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_few_chunks_with_index_on_disk_and_then_reopening_db.cs index 16bb0978803..d40d1fdef4b 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_few_chunks_with_index_on_disk_and_then_reopening_db.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_few_chunks_with_index_on_disk_and_then_reopening_db.cs @@ -1,6 +1,7 @@ using System.IO; using System.Linq; using EventStore.Core.Data; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.TransactionLog.Truncation { @@ -121,7 +122,7 @@ public void read_stream_backward_doesnt_return_truncated_records() { [Test] public void read_all_returns_only_survived_events() { - var res = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100); + var res = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp); var records = res.EventRecords() .Select(r => r.Event) .ToArray(); @@ -147,7 +148,7 @@ public void read_all_backward_doesnt_return_truncated_records() { [Test] public void read_all_backward_from_last_truncated_record_returns_no_records() { var pos = new TFPos(_event7.LogPosition, _event3.LogPosition); - var res = ReadIndex.ReadAllEventsForward(pos, 100); + var res = ReadIndex.ReadAllEventsForward(pos, 100, ITransactionFileTracker.NoOp); var records = res.EventRecords() .Select(r => r.Event) .ToArray(); diff --git a/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_into_the_middle_of_scavenged_chunk_with_index_in_memory_and_then_reopening_db.cs b/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_into_the_middle_of_scavenged_chunk_with_index_in_memory_and_then_reopening_db.cs index 6debdfd2544..d441ea5ba5e 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_into_the_middle_of_scavenged_chunk_with_index_in_memory_and_then_reopening_db.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_into_the_middle_of_scavenged_chunk_with_index_in_memory_and_then_reopening_db.cs @@ -1,6 +1,7 @@ using System.IO; using System.Linq; using EventStore.Core.Data; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.TransactionLog.Truncation { @@ -126,7 +127,7 @@ public void read_stream_backward_doesnt_return_truncated_records() { [Test] public void read_all_forward_returns_only_survived_events() { - var res = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100); + var res = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp); var records = res.EventRecords() .Select(r => r.Event) .ToArray(); diff --git a/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_single_uncompleted_chunk_with_index_in_memory_and_then_reopening_db.cs b/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_single_uncompleted_chunk_with_index_in_memory_and_then_reopening_db.cs index 8993c0615f6..7eed7fbd227 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_single_uncompleted_chunk_with_index_in_memory_and_then_reopening_db.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_single_uncompleted_chunk_with_index_in_memory_and_then_reopening_db.cs @@ -1,5 +1,6 @@ using System.Linq; using EventStore.Core.Data; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.TransactionLog.Truncation { @@ -67,7 +68,7 @@ public void read_stream_backward_doesnt_return_truncated_records() { [Test] public void read_all_forward_doesnt_return_truncated_records() { - var res = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100); + var res = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp); var records = res.EventRecords() .Select(r => r.Event) .ToArray(); @@ -88,7 +89,7 @@ public void read_all_backward_doesnt_return_truncated_records() { [Test] public void read_all_backward_from_last_truncated_record_returns_no_records() { var pos = new TFPos(_event3.LogPosition, _event3.LogPosition); - var res = ReadIndex.ReadAllEventsForward(pos, 100); + var res = ReadIndex.ReadAllEventsForward(pos, 100, ITransactionFileTracker.NoOp); var records = res.EventRecords() .Select(r => r.Event) .ToArray(); diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/IReadIndex.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/IReadIndex.cs index be6bcc11676..27ed65a04fc 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/IReadIndex.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/IReadIndex.cs @@ -15,7 +15,7 @@ public interface IReadIndex { /// Returns event records in the sequence they were committed into TF. /// Positions is specified as pre-positions (pointer at the beginning of the record). /// - IndexReadAllResult ReadAllEventsForward(TFPos pos, int maxCount, ITransactionFileTracker tracker = null); //qqqqq make not optional + IndexReadAllResult ReadAllEventsForward(TFPos pos, int maxCount, ITransactionFileTracker tracker); /// /// Returns event records in the reverse sequence they were committed into TF. From c0e3368e27be9e14d68c1b41790da23198ae1bfa Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Fri, 22 Nov 2024 10:51:56 +0000 Subject: [PATCH 09/38] more plumbing --- .../AllReader/when_reading_all_with_disallowed_streams.cs | 8 ++++---- .../Storage/AllReader/when_reading_all_with_filtering.cs | 8 ++++---- .../when_reading_all_with_filtering_and_transactions.cs | 3 ++- .../Services/Storage/ReaderIndex/IReadIndex.cs | 4 ++-- .../Services/Storage/StorageReaderWorker.cs | 2 +- 5 files changed, 13 insertions(+), 12 deletions(-) diff --git a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_disallowed_streams.cs b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_disallowed_streams.cs index 6fabcde2a63..f1ec4900121 100644 --- a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_disallowed_streams.cs +++ b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_disallowed_streams.cs @@ -114,7 +114,7 @@ public void should_filter_out_disallowed_streams_when_reading_events_backward_wi Filter.Types.FilterType.Prefix, new[] {"event-type"}); var eventFilter = EventFilter.Get(true, filter); - var result = ReadIndex.ReadAllEventsBackwardFiltered(_backwardReadPos, 10, 10, eventFilter); + var result = ReadIndex.ReadAllEventsBackwardFiltered(_backwardReadPos, 10, 10, eventFilter, ITransactionFileTracker.NoOp); Assert.AreEqual(2, result.Records.Count); Assert.True(result.Records.All(x => x.Event.EventStreamId != _disallowedStream)); Assert.True(result.Records.Any(x => x.Event.EventStreamId == _allowedStream1)); @@ -128,7 +128,7 @@ public void should_filter_out_disallowed_streams_when_reading_events_backward_wi Filter.Types.FilterType.Regex, new[] {@"^.*event-type-.*$"}); var eventFilter = EventFilter.Get(true, filter); - var result = ReadIndex.ReadAllEventsBackwardFiltered(_backwardReadPos, 10, 10, eventFilter); + var result = ReadIndex.ReadAllEventsBackwardFiltered(_backwardReadPos, 10, 10, eventFilter, ITransactionFileTracker.NoOp); Assert.AreEqual(2, result.Records.Count); Assert.True(result.Records.All(x => x.Event.EventStreamId != _disallowedStream)); Assert.True(result.Records.Any(x => x.Event.EventStreamId == _allowedStream1)); @@ -142,7 +142,7 @@ public void should_filter_out_disallowed_streams_when_reading_events_backward_wi Filter.Types.FilterType.Prefix, new[] {"$persistentsubscripti"}); var eventFilter = EventFilter.Get(true, filter); - var result = ReadIndex.ReadAllEventsBackwardFiltered(_backwardReadPos, 10, 10, eventFilter); + var result = ReadIndex.ReadAllEventsBackwardFiltered(_backwardReadPos, 10, 10, eventFilter, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.Records.Count); Assert.True(result.Records.All(x => x.Event.EventStreamId != _disallowedStream)); Assert.True(result.Records.Any(x => x.Event.EventStreamId == _allowedStream2)); @@ -155,7 +155,7 @@ public void should_filter_out_disallowed_streams_when_reading_events_backward_wi Filter.Types.FilterType.Regex, new[] {@"^.*istentsubsc.*$"}); var eventFilter = EventFilter.Get(true, filter); - var result = ReadIndex.ReadAllEventsBackwardFiltered(_backwardReadPos, 10, 10, eventFilter); + var result = ReadIndex.ReadAllEventsBackwardFiltered(_backwardReadPos, 10, 10, eventFilter, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.Records.Count); Assert.True(result.Records.All(x => x.Event.EventStreamId != _disallowedStream)); Assert.True(result.Records.Any(x => x.Event.EventStreamId == _allowedStream2)); diff --git a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_filtering.cs b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_filtering.cs index 6649f4fff1e..80d747784ff 100644 --- a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_filtering.cs +++ b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_filtering.cs @@ -79,7 +79,7 @@ public void should_read_only_events_backward_with_event_type_prefix() { Filter.Types.FilterType.Prefix, new[] {"event-type"}); var eventFilter = EventFilter.Get(true, filter); - var result = ReadIndex.ReadAllEventsBackwardFiltered(_backwardReadPos, 10, 10, eventFilter); + var result = ReadIndex.ReadAllEventsBackwardFiltered(_backwardReadPos, 10, 10, eventFilter, ITransactionFileTracker.NoOp); Assert.AreEqual(2, result.Records.Count); } @@ -90,7 +90,7 @@ public void should_read_only_events_backward_with_event_type_regex() { Filter.Types.FilterType.Regex, new[] {@"^.*other-event.*$"}); var eventFilter = EventFilter.Get(true, filter); - var result = ReadIndex.ReadAllEventsBackwardFiltered(_backwardReadPos, 10, 10, eventFilter); + var result = ReadIndex.ReadAllEventsBackwardFiltered(_backwardReadPos, 10, 10, eventFilter, ITransactionFileTracker.NoOp); Assert.AreEqual(2, result.Records.Count); } @@ -101,7 +101,7 @@ public void should_read_only_events_backward_with_stream_id_prefix() { Filter.Types.FilterType.Prefix, new[] {"ES2"}); var eventFilter = EventFilter.Get(true, filter); - var result = ReadIndex.ReadAllEventsBackwardFiltered(_backwardReadPos, 10, 10, eventFilter); + var result = ReadIndex.ReadAllEventsBackwardFiltered(_backwardReadPos, 10, 10, eventFilter, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.Records.Count); } @@ -112,7 +112,7 @@ public void should_read_only_events_backward_with_stream_id_regex() { Filter.Types.FilterType.Regex, new[] {@"^.*ES2.*$"}); var eventFilter = EventFilter.Get(true, filter); - var result = ReadIndex.ReadAllEventsBackwardFiltered(_backwardReadPos, 10, 10, eventFilter); + var result = ReadIndex.ReadAllEventsBackwardFiltered(_backwardReadPos, 10, 10, eventFilter, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.Records.Count); } } diff --git a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_filtering_and_transactions.cs b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_filtering_and_transactions.cs index 8a89aadbefe..6e95ed6abfc 100644 --- a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_filtering_and_transactions.cs +++ b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_filtering_and_transactions.cs @@ -85,7 +85,8 @@ static Rec[] ExplicitTransaction(int transaction, string stream) => new[] { pos: new TFPos(writerCp, writerCp), maxCount: 10, maxSearchWindow: int.MaxValue, - eventFilter: EventFilter.StreamName.Prefixes(false, "included")); + eventFilter: EventFilter.StreamName.Prefixes(false, "included"), + tracker: ITransactionFileTracker.NoOp); Assert.AreEqual(10, read.Records.Count); for (int j = 9; j <= 0; j--) diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/IReadIndex.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/IReadIndex.cs index 27ed65a04fc..822b32824a4 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/IReadIndex.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/IReadIndex.cs @@ -29,7 +29,7 @@ public interface IReadIndex { /// IndexReadAllResult ReadAllEventsForwardFiltered(TFPos pos, int maxCount, int maxSearchWindow, IEventFilter eventFilter, - ITransactionFileTracker tracker = null); //qqqqq make not optional + ITransactionFileTracker tracker); /// /// Returns event records whose eventType matches the given EventFilter in the sequence they were committed into TF. @@ -37,7 +37,7 @@ IndexReadAllResult ReadAllEventsForwardFiltered(TFPos pos, int maxCount, int max /// IndexReadAllResult ReadAllEventsBackwardFiltered(TFPos pos, int maxCount, int maxSearchWindow, IEventFilter eventFilter, - ITransactionFileTracker tracker = null); //qqqqq make not optional + ITransactionFileTracker tracker); void Close(); void Dispose(); diff --git a/src/EventStore.Core/Services/Storage/StorageReaderWorker.cs b/src/EventStore.Core/Services/Storage/StorageReaderWorker.cs index fd293213caa..f384455acd3 100644 --- a/src/EventStore.Core/Services/Storage/StorageReaderWorker.cs +++ b/src/EventStore.Core/Services/Storage/StorageReaderWorker.cs @@ -563,7 +563,7 @@ private ClientMessage.FilteredReadAllEventsBackwardCompleted FilteredReadAllEven lastIndexedPosition); var res = _readIndex.ReadAllEventsBackwardFiltered(pos, msg.MaxCount, msg.MaxSearchWindow, - msg.EventFilter); + msg.EventFilter, ITransactionFileTracker.NoOp); //qq var resolved = ResolveReadAllResult(res.Records, msg.ResolveLinkTos, msg.User); if (resolved == null) return NoDataForFilteredCommand(msg, FilteredReadAllResult.AccessDenied, pos, From 17c34133f49f93e30f075ca97c4256a917c56a88 Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Fri, 22 Nov 2024 10:57:54 +0000 Subject: [PATCH 10/38] more plumbing --- ...hen_reading_all_with_disallowed_streams.cs | 2 +- .../when_reading_all_with_last_indexed_set.cs | 2 +- ..._tfile_with_multiple_events_in_a_stream.cs | 2 +- ...dex_off_tfile_with_prepares_and_commits.cs | 2 +- ...rsion_numbers_greater_than_int_maxvalue.cs | 2 +- ...mmits_for_log_records_of_mixed_versions.cs | 2 +- ..._off_tfile_with_prepares_but_no_commits.cs | 2 +- ...dex_off_tfile_with_two_events_in_stream.cs | 2 +- ...prepare_but_no_commit_read_index_should.cs | 2 +- ...elete_on_this_version_read_index_should.cs | 2 +- ..._deleted_event_stream_read_index_should.cs | 2 +- ...second_stream_deleted_read_index_should.cs | 2 +- ...hird_one_deleted_each_read_index_should.cs | 2 +- ..._first_stream_deleted_read_index_should.cs | 2 +- ...count_specified_with_maxage_more_strict.cs | 2 +- ...unt_specified_with_maxcount_more_strict.cs | 2 +- ...hen_having_stream_with_maxage_specified.cs | 2 +- ...n_having_stream_with_maxcount_specified.cs | 2 +- ...ng_stream_with_truncatebefore_specified.cs | 2 +- ...ith_maxcount_and_streams_have_same_hash.cs | 2 +- ...count_specified_with_maxage_more_strict.cs | 2 +- ...unt_specified_with_maxcount_more_strict.cs | 2 +- ...hen_having_stream_with_maxage_specified.cs | 2 +- ...n_having_stream_with_maxcount_specified.cs | 2 +- ...ng_stream_with_truncatebefore_specified.cs | 2 +- .../MaxAgeMaxCount/with_big_max_age.cs | 2 +- .../MaxAgeMaxCount/with_big_max_count.cs | 2 +- .../MaxAgeMaxCount/with_big_start_from.cs | 2 +- ...th_invalid_max_age_and_normal_max_count.cs | 2 +- ...th_invalid_max_count_and_normal_max_age.cs | 2 +- .../MaxAgeMaxCount/with_invalid_metadata.cs | 2 +- .../MaxAgeMaxCount/with_too_big_max_age.cs | 2 +- ...th_too_big_max_age_and_normal_max_count.cs | 2 +- .../MaxAgeMaxCount/with_too_big_max_count.cs | 2 +- ...th_too_big_max_count_and_normal_max_age.cs | 2 +- .../MaxAgeMaxCount/with_too_big_start_from.cs | 2 +- ...runcatebefore_greater_than_int_maxvalue.cs | 2 +- ...hunks_in_2nd_chunk__in_db_with_3_chunks.cs | 4 ++-- ...ng_through_2_chunks_in_db_with_2_chunks.cs | 4 ++-- ...ng_through_2_chunks_in_db_with_3_chunks.cs | 4 ++-- ...en_scavenging_tfchunk_with_transactions.cs | 14 ++++++------ ...version0_log_records_using_transactions.cs | 14 ++++++------ ..._events_and_metaevents_are_in_one_chunk.cs | 4 ++-- ..._but_some_events_are_in_multiple_chunks.cs | 4 ++-- ...ut_some_events_are_in_multiple_chunks_2.cs | 4 ++-- ..._some_metaevents_are_in_multiple_chunks.cs | 4 ++-- ..._but_some_events_are_in_multiple_chunks.cs | 4 ++-- ...s_softdeleted_with_log_record_version_0.cs | 4 ++-- ...ixed_log_record_version_0_and_version_1.cs | 4 ++-- ...ng_through_2_chunks_in_db_with_2_chunks.cs | 2 +- ...uential_write_request_read_index_should.cs | 2 +- ...d_prepares_in_the_end_read_index_should.cs | 22 +++++++++---------- ...s_spanning_few_chunks_read_index_should.cs | 22 +++++++++---------- ...rmingled_transactions_read_index_should.cs | 14 ++++++------ ...s_spanning_few_chunks_read_index_should.cs | 14 ++++++------ ...ith_index_on_disk_and_then_reopening_db.cs | 2 +- ...h_index_in_memory_and_then_reopening_db.cs | 2 +- ...h_index_in_memory_and_then_reopening_db.cs | 2 +- .../Storage/ReaderIndex/IReadIndex.cs | 2 +- .../Services/Storage/StorageReaderWorker.cs | 2 +- 60 files changed, 114 insertions(+), 114 deletions(-) diff --git a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_disallowed_streams.cs b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_disallowed_streams.cs index f1ec4900121..2dc9e9e2470 100644 --- a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_disallowed_streams.cs +++ b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_disallowed_streams.cs @@ -100,7 +100,7 @@ public void should_filter_out_disallowed_streams_when_reading_events_forward_wit [Test] public void should_filter_out_disallowed_streams_when_reading_events_backward() { - var records = ReadIndex.ReadAllEventsBackward(_backwardReadPos, 10).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(_backwardReadPos, 10, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(2, records.Count); Assert.True(records.All(x => x.Event.EventStreamId != _disallowedStream)); Assert.True(records.Any(x => x.Event.EventStreamId == _allowedStream1)); diff --git a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_last_indexed_set.cs b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_last_indexed_set.cs index 9db70183174..dea2172c56a 100644 --- a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_last_indexed_set.cs +++ b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_last_indexed_set.cs @@ -19,7 +19,7 @@ protected override void WriteTestScenario() { public void should_be_able_to_read_all_backwards() { var checkpoint = WriterCheckpoint.Read(); var pos = new TFPos(checkpoint, checkpoint); - var result = ReadIndex.ReadAllEventsBackward(pos, 10).EventRecords(); + var result = ReadIndex.ReadAllEventsBackward(pos, 10, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(3, result.Count); } diff --git a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_multiple_events_in_a_stream.cs b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_multiple_events_in_a_stream.cs index 1a5ad026411..40b443f8995 100644 --- a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_multiple_events_in_a_stream.cs +++ b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_multiple_events_in_a_stream.cs @@ -119,7 +119,7 @@ public void read_all_events_forward_returns_all_events_in_correct_order() { [Test] public void read_all_events_backward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 10).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 10, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(2, records.Count); Assert.AreEqual(_id1, records[1].Event.EventId); diff --git a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits.cs b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits.cs index 1e6a2a656d6..1c6d54ab6ec 100644 --- a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits.cs +++ b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits.cs @@ -116,7 +116,7 @@ public void read_all_events_forward_returns_all_events_in_correct_order() { [Test] public void read_all_events_backward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 10).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 10, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(3, records.Count); Assert.AreEqual(_id1, records[2].Event.EventId); diff --git a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits_for_events_with_version_numbers_greater_than_int_maxvalue.cs b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits_for_events_with_version_numbers_greater_than_int_maxvalue.cs index 6bff68d25d2..de564315b07 100644 --- a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits_for_events_with_version_numbers_greater_than_int_maxvalue.cs +++ b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits_for_events_with_version_numbers_greater_than_int_maxvalue.cs @@ -112,7 +112,7 @@ public void read_all_events_forward_returns_all_events_in_correct_order() { [Test] public void read_all_events_backward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 10).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 10, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(3, records.Count); Assert.AreEqual(_id1, records[2].Event.EventId); diff --git a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits_for_log_records_of_mixed_versions.cs b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits_for_log_records_of_mixed_versions.cs index dae191a4e25..83c823a79ad 100644 --- a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits_for_log_records_of_mixed_versions.cs +++ b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits_for_log_records_of_mixed_versions.cs @@ -109,7 +109,7 @@ public void read_all_events_forward_returns_all_events_in_correct_order() { [Test] public void read_all_events_backward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 10).Records; + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(3, records.Count); Assert.AreEqual(_id1, records[2].Event.EventId); diff --git a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_but_no_commits.cs b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_but_no_commits.cs index 8c1dc8f2585..8b16380b0ed 100644 --- a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_but_no_commits.cs +++ b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_but_no_commits.cs @@ -59,7 +59,7 @@ public void read_all_events_forward_returns_no_events() { [Test] public void read_all_events_backward_returns_no_events() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 10).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 10, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(0, records.Count); } } diff --git a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_two_events_in_stream.cs b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_two_events_in_stream.cs index c255867a9e2..2184a90bbfa 100644 --- a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_two_events_in_stream.cs +++ b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_two_events_in_stream.cs @@ -110,7 +110,7 @@ public void read_all_events_forward_returns_all_events_in_correct_order() { [Test] public void read_all_events_backward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 10).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 10, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(2, records.Count); Assert.AreEqual(_id1, records[1].Event.EventId); diff --git a/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_writing_delete_prepare_but_no_commit_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_writing_delete_prepare_but_no_commit_read_index_should.cs index 5b724a19256..219550f8328 100644 --- a/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_writing_delete_prepare_but_no_commit_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_writing_delete_prepare_but_no_commit_read_index_should.cs @@ -82,7 +82,7 @@ public void read_all_forward_should_return_all_stream_records_except_uncommited( [Test] public void read_all_backward_should_return_all_stream_records_except_uncommited() { - var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords() + var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(2, events.Length); diff --git a/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_writing_few_prepares_on_same_event_number_and_commiting_delete_on_this_version_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_writing_few_prepares_on_same_event_number_and_commiting_delete_on_this_version_read_index_should.cs index e7943768c0d..87a3088413c 100644 --- a/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_writing_few_prepares_on_same_event_number_and_commiting_delete_on_this_version_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_writing_few_prepares_on_same_event_number_and_commiting_delete_on_this_version_read_index_should.cs @@ -122,7 +122,7 @@ public void read_all_forward_should_return_all_stream_records_except_uncommited( [Test] public void read_all_backward_should_return_all_stream_records_except_uncommited() { - var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords() + var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(1, events.Length); diff --git a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_single_deleted_event_stream_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_single_deleted_event_stream_read_index_should.cs index 093caf797d3..f974cc555af 100644 --- a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_single_deleted_event_stream_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_single_deleted_event_stream_read_index_should.cs @@ -122,7 +122,7 @@ public void return_all_events_on_read_all_forward() { [Test] public void return_all_events_on_read_all_backward() { - var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords() + var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(2, events.Length); diff --git a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_three_collisioned_streams_one_event_each_with_second_stream_deleted_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_three_collisioned_streams_one_event_each_with_second_stream_deleted_read_index_should.cs index 20f20c949fa..de85cfde5ae 100644 --- a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_three_collisioned_streams_one_event_each_with_second_stream_deleted_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_three_collisioned_streams_one_event_each_with_second_stream_deleted_read_index_should.cs @@ -208,7 +208,7 @@ public void return_all_events_on_read_all_forward() { [Test] public void return_all_events_on_read_all_backward() { - var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords() + var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(4, events.Length); diff --git a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_three_collisioned_streams_with_different_number_of_events_third_one_deleted_each_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_three_collisioned_streams_with_different_number_of_events_third_one_deleted_each_read_index_should.cs index 7bbe3324849..a3b7ed4f712 100644 --- a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_three_collisioned_streams_with_different_number_of_events_third_one_deleted_each_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_three_collisioned_streams_with_different_number_of_events_third_one_deleted_each_read_index_should.cs @@ -585,7 +585,7 @@ public void return_all_prepares_on_read_all_forward() { [Test] public void return_all_prepares_on_read_all_backward() { - var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords() + var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(3 + 5 + 7 + 1, events.Length); diff --git a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_two_collisioned_streams_one_event_each_first_stream_deleted_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_two_collisioned_streams_one_event_each_first_stream_deleted_read_index_should.cs index 48be8156e13..62eb3331dc3 100644 --- a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_two_collisioned_streams_one_event_each_first_stream_deleted_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_two_collisioned_streams_one_event_each_first_stream_deleted_read_index_should.cs @@ -120,7 +120,7 @@ public void return_all_events_on_read_all_forward() { [Test] public void return_all_events_on_read_all_backward() { - var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords() + var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(3, events.Length); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_both_maxage_and_maxcount_specified_with_maxage_more_strict.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_both_maxage_and_maxcount_specified_with_maxage_more_strict.cs index fd0b96ba1a1..998a506240d 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_both_maxage_and_maxcount_specified_with_maxage_more_strict.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_both_maxage_and_maxcount_specified_with_maxage_more_strict.cs @@ -80,7 +80,7 @@ public void read_all_forward_doesnt_return_expired_records() { [Test] public void read_all_backward_doesnt_return_expired_records() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(3, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_both_maxage_and_maxcount_specified_with_maxcount_more_strict.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_both_maxage_and_maxcount_specified_with_maxcount_more_strict.cs index 884285d620f..688522572d4 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_both_maxage_and_maxcount_specified_with_maxcount_more_strict.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_both_maxage_and_maxcount_specified_with_maxcount_more_strict.cs @@ -86,7 +86,7 @@ public void read_all_forward_doesnt_return_expired_records() { [Test] public void read_all_backward_doesnt_return_expired_records() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(4, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_maxage_specified.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_maxage_specified.cs index e6adf61eb8a..7dc6f389ee8 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_maxage_specified.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_maxage_specified.cs @@ -80,7 +80,7 @@ public void read_all_forward_doesnt_return_expired_records() { [Test] public void read_all_backward_doesnt_return_expired_records() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(3, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_maxcount_specified.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_maxcount_specified.cs index d3cf754fbfc..4e889dcfbdb 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_maxcount_specified.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_maxcount_specified.cs @@ -85,7 +85,7 @@ public void read_all_forward_doesnt_return_expired_records() { [Test] public void read_all_backward_doesnt_return_expired_records() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(5, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_truncatebefore_specified.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_truncatebefore_specified.cs index 2677984d474..df0582d9207 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_truncatebefore_specified.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_truncatebefore_specified.cs @@ -84,7 +84,7 @@ public void read_all_forward_doesnt_return_expired_records() { [Test] public void read_all_backward_doesnt_return_expired_records() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(5, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_one_stream_with_maxage_and_other_stream_with_maxcount_and_streams_have_same_hash.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_one_stream_with_maxage_and_other_stream_with_maxcount_and_streams_have_same_hash.cs index d0110414240..ee0f8afac86 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_one_stream_with_maxage_and_other_stream_with_maxcount_and_streams_have_same_hash.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_one_stream_with_maxage_and_other_stream_with_maxcount_and_streams_have_same_hash.cs @@ -172,7 +172,7 @@ public void read_all_forward_returns_all_records_including_expired_ones() { [Test] public void read_all_backward_returns_all_records_including_expired_ones() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(12, records.Count); Assert.AreEqual(_r11, records[11].Event); Assert.AreEqual(_r21, records[10].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_both_maxage_and_maxcount_specified_with_maxage_more_strict.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_both_maxage_and_maxcount_specified_with_maxage_more_strict.cs index 1d36bfa537a..7961c9d6446 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_both_maxage_and_maxcount_specified_with_maxage_more_strict.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_both_maxage_and_maxcount_specified_with_maxage_more_strict.cs @@ -84,7 +84,7 @@ public void read_all_forward_returns_all_records_including_expired_ones() { [Test] public void read_all_backward_returns_all_records_including_expired_ones() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_both_maxage_and_maxcount_specified_with_maxcount_more_strict.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_both_maxage_and_maxcount_specified_with_maxcount_more_strict.cs index f0a08e3de3c..7f644a8f4ec 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_both_maxage_and_maxcount_specified_with_maxcount_more_strict.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_both_maxage_and_maxcount_specified_with_maxcount_more_strict.cs @@ -86,7 +86,7 @@ public void read_all_forward_returns_all_records_including_expired_ones() { [Test] public void read_all_backward_returns_all_records_including_expired_ones() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_maxage_specified.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_maxage_specified.cs index 9abb210aa48..276a6dc09fb 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_maxage_specified.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_maxage_specified.cs @@ -83,7 +83,7 @@ public void read_all_forward_returns_all_records_including_expired_ones() { [Test] public void read_all_backward_returns_all_records_including_expired_ones() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_maxcount_specified.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_maxcount_specified.cs index 269d1ca7fc0..5f570979256 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_maxcount_specified.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_maxcount_specified.cs @@ -86,7 +86,7 @@ public void read_all_forward_returns_all_records_including_expired_ones() { [Test] public void read_all_backward_returns_all_records_including_expired_ones() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_truncatebefore_specified.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_truncatebefore_specified.cs index d9adb82e91c..566e1f20f07 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_truncatebefore_specified.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_truncatebefore_specified.cs @@ -82,7 +82,7 @@ public void read_all_forward_returns_all_records_including_expired_ones() { [Test] public void read_all_backward_returns_all_records_including_expired_ones() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_max_age.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_max_age.cs index e897c068bf5..9d4feac9615 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_max_age.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_max_age.cs @@ -98,7 +98,7 @@ public void read_all_forward_returns_all_records() { [Test] public void read_all_backward_returns_all_records() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_max_count.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_max_count.cs index 90338b10abe..a95864ffca8 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_max_count.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_max_count.cs @@ -98,7 +98,7 @@ public void read_all_forward_returns_all_records() { [Test] public void read_all_backward_returns_all_records() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_start_from.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_start_from.cs index 49aa30264c7..dc411a09c05 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_start_from.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_start_from.cs @@ -87,7 +87,7 @@ public void read_all_forward_returns_all_records() { [Test] public void read_all_backward_returns_all_records() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_max_age_and_normal_max_count.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_max_age_and_normal_max_count.cs index 2cb0bd1d007..dfe5dfd1184 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_max_age_and_normal_max_count.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_max_age_and_normal_max_count.cs @@ -103,7 +103,7 @@ public void on_read_all_forward_metadata_is_ignored() { [Test] public void on_read_all_backward_metadata_is_ignored() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).Records; + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).Records; if (LogFormatHelper.IsV2) { Assert.AreEqual(6, records.Count); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_max_count_and_normal_max_age.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_max_count_and_normal_max_age.cs index e12c752fbfd..3c68b1f9a7e 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_max_count_and_normal_max_age.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_max_count_and_normal_max_age.cs @@ -103,7 +103,7 @@ public void on_read_all_forward_metadata_is_ignored() { [Test] public void on_read_all_backward_metadata_is_ignored() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).Records; + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).Records; if (LogFormatHelper.IsV2) { Assert.AreEqual(6, records.Count); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_metadata.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_metadata.cs index 7725a89da35..bf64c55957f 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_metadata.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_metadata.cs @@ -90,7 +90,7 @@ public void on_read_all_forward_all_metadata_is_ignored() { [Test] public void on_read_all_backward_all_metadata_is_ignored() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_age.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_age.cs index cb3e80cdacb..2e866266fab 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_age.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_age.cs @@ -90,7 +90,7 @@ public void read_all_forward_returns_all_records() { [Test] public void read_all_backward_returns_all_records() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_age_and_normal_max_count.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_age_and_normal_max_count.cs index a15b725ae84..f5788604149 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_age_and_normal_max_count.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_age_and_normal_max_count.cs @@ -91,7 +91,7 @@ public void on_read_all_forward_all_metadata_is_ignored() { [Test] public void on_read_all_backward_all_metadata_is_ignored() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_count.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_count.cs index 8fcf85f0ab2..f0b257cb160 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_count.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_count.cs @@ -90,7 +90,7 @@ public void read_all_forward_returns_all_records() { [Test] public void read_all_backward_returns_all_records() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_count_and_normal_max_age.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_count_and_normal_max_age.cs index 4adbb42e8ac..e4326d9e8a1 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_count_and_normal_max_age.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_count_and_normal_max_age.cs @@ -93,7 +93,7 @@ public void on_read_all_forward_all_metadata_is_ignored() { [Test] public void on_read_all_backward_all_metadata_is_ignored() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_start_from.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_start_from.cs index 7241edeaddf..fc22106d9f4 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_start_from.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_start_from.cs @@ -90,7 +90,7 @@ public void read_all_forward_returns_all_records() { [Test] public void read_all_backward_returns_all_records() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_truncatebefore_greater_than_int_maxvalue.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_truncatebefore_greater_than_int_maxvalue.cs index 1df19f9606a..eb71ad17d47 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_truncatebefore_greater_than_int_maxvalue.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_truncatebefore_greater_than_int_maxvalue.cs @@ -99,7 +99,7 @@ public void read_all_forward_returns_all_records() { [Test] public void read_all_backward_returns_all_records() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_2nd_chunk__in_db_with_3_chunks.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_2nd_chunk__in_db_with_3_chunks.cs index cde8dc72134..2aeb249e12f 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_2nd_chunk__in_db_with_3_chunks.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_2nd_chunk__in_db_with_3_chunks.cs @@ -39,7 +39,7 @@ public void read_all_forward_does_not_return_scavenged_deleted_stream_events_and [Test] public void read_all_backward_does_not_return_scavenged_deleted_stream_events_and_return_remaining() { - var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords() + var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(2, events.Length); @@ -50,7 +50,7 @@ public void read_all_backward_does_not_return_scavenged_deleted_stream_events_an [Test] public void read_all_backward_from_beginning_of_second_chunk_returns_no_records() { var pos = new TFPos(10000, 10000); - var events = ReadIndex.ReadAllEventsBackward(pos, 100).EventRecords() + var events = ReadIndex.ReadAllEventsBackward(pos, 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(0, events.Length); diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_db_with_2_chunks.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_db_with_2_chunks.cs index cbbd16a7e4d..4682018b660 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_db_with_2_chunks.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_db_with_2_chunks.cs @@ -36,7 +36,7 @@ public void read_all_forward_returns_events_only_from_uncompleted_chunk_and_dele [Test] public void read_all_backward_returns_events_only_from_uncompleted_chunk_and_delete_record() { - var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords() + var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(3, events.Length); @@ -48,7 +48,7 @@ public void read_all_backward_returns_events_only_from_uncompleted_chunk_and_del [Test] public void read_all_backward_from_beginning_of_second_chunk_returns_no_records() { var pos = new TFPos(10000, 10000); - var events = ReadIndex.ReadAllEventsBackward(pos, 100).EventRecords() + var events = ReadIndex.ReadAllEventsBackward(pos, 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(0, events.Length); diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_db_with_3_chunks.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_db_with_3_chunks.cs index 6a65f5d88c3..2706cc23728 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_db_with_3_chunks.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_db_with_3_chunks.cs @@ -38,7 +38,7 @@ public void [Test] public void read_all_backward_does_not_return_scavenged_deleted_stream_events_and_return_remaining_plus_delete_record() { - var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords() + var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(2, events.Length); @@ -49,7 +49,7 @@ public void [Test] public void read_all_backward_from_beginning_of_second_chunk_returns_no_records() { var pos = new TFPos(10000, 10000); - var events = ReadIndex.ReadAllEventsBackward(pos, 100).EventRecords() + var events = ReadIndex.ReadAllEventsBackward(pos, 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(0, events.Length); diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_transactions.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_transactions.cs index fdba042fbf7..2b99561d423 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_transactions.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_transactions.cs @@ -211,7 +211,7 @@ public void read_all_events_forward_returns_all_events_in_correct_order() { [Test] public void read_all_events_backward_returns_all_events_in_correct_order() { var pos = GetBackwardReadPos(); - var records = ReadIndex.ReadAllEventsBackward(pos, 10).Records; + var records = ReadIndex.ReadAllEventsBackward(pos, 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(6, records.Count); Assert.AreEqual(_random1.EventId, records[0].Event.EventId); @@ -233,7 +233,7 @@ public void [Test] public void read_all_events_backwards_returns_nothing_when_prepare_position_is_smaller_than_first_prepare_in_commit() { - var records = ReadIndex.ReadAllEventsBackward(new TFPos(_t2CommitPos, 0), 10).Records; + var records = ReadIndex.ReadAllEventsBackward(new TFPos(_t2CommitPos, 0), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(0, records.Count); } @@ -248,7 +248,7 @@ public void read_all_events_forward_returns_correct_events_starting_in_the_middl Assert.AreEqual(_p5.EventId, res1.Records[3].Event.EventId); Assert.AreEqual(_random1.EventId, res1.Records[4].Event.EventId); - var res2 = ReadIndex.ReadAllEventsBackward(res1.PrevPos, 10); + var res2 = ReadIndex.ReadAllEventsBackward(res1.PrevPos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(1, res2.Records.Count); Assert.AreEqual(_p2.EventId, res2.Records[0].Event.EventId); } @@ -256,7 +256,7 @@ public void read_all_events_forward_returns_correct_events_starting_in_the_middl [Test] public void read_all_events_backward_returns_correct_events_starting_in_the_middle_of_tf() { var pos = new TFPos(_postCommitPos, _p4.LogPosition); // p3 post position - var res1 = ReadIndex.ReadAllEventsBackward(pos, 10); + var res1 = ReadIndex.ReadAllEventsBackward(pos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(4, res1.Records.Count); Assert.AreEqual(_p3.EventId, res1.Records[0].Event.EventId); @@ -293,7 +293,7 @@ public void all_records_can_be_read_sequentially_page_by_page_in_backward_pass() int count = 0; var pos = GetBackwardReadPos(); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsBackward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsBackward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count].EventId, result.Records[0].Event.EventId); pos = result.NextPos; @@ -317,7 +317,7 @@ public void position_returned_for_prev_page_when_traversing_forward_allow_to_tra var localPos = result.PrevPos; int localCount = 0; IndexReadAllResult localResult; - while ((localResult = ReadIndex.ReadAllEventsBackward(localPos, 1)).Records.Count != 0) { + while ((localResult = ReadIndex.ReadAllEventsBackward(localPos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, localResult.Records.Count); Assert.AreEqual(recs[count - 1 - localCount].EventId, localResult.Records[0].Event.EventId); localPos = localResult.NextPos; @@ -338,7 +338,7 @@ public void position_returned_for_prev_page_when_traversing_backward_allow_to_tr int count = 0; var pos = GetBackwardReadPos(); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsBackward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsBackward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count].EventId, result.Records[0].Event.EventId); diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_version0_log_records_using_transactions.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_version0_log_records_using_transactions.cs index 2ffa8eaac53..df98d75de92 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_version0_log_records_using_transactions.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_version0_log_records_using_transactions.cs @@ -260,7 +260,7 @@ public void read_all_events_forward_returns_all_events_in_correct_order() { [Test] public void read_all_events_backward_returns_all_events_in_correct_order() { var pos = GetBackwardReadPos(); - var records = ReadIndex.ReadAllEventsBackward(pos, 10).Records; + var records = ReadIndex.ReadAllEventsBackward(pos, 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(6, records.Count); Assert.AreEqual(_random1.EventId, records[0].Event.EventId); @@ -282,7 +282,7 @@ public void [Test] public void read_all_events_backwards_returns_nothing_when_prepare_position_is_smaller_than_first_prepare_in_commit() { - var records = ReadIndex.ReadAllEventsBackward(new TFPos(_t2CommitPos, 0), 10).Records; + var records = ReadIndex.ReadAllEventsBackward(new TFPos(_t2CommitPos, 0), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(0, records.Count); } @@ -297,7 +297,7 @@ public void read_all_events_forward_returns_correct_events_starting_in_the_middl Assert.AreEqual(_p5.EventId, res1.Records[3].Event.EventId); Assert.AreEqual(_random1.EventId, res1.Records[4].Event.EventId); - var res2 = ReadIndex.ReadAllEventsBackward(res1.PrevPos, 10); + var res2 = ReadIndex.ReadAllEventsBackward(res1.PrevPos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(1, res2.Records.Count); Assert.AreEqual(_p2.EventId, res2.Records[0].Event.EventId); } @@ -305,7 +305,7 @@ public void read_all_events_forward_returns_correct_events_starting_in_the_middl [Test] public void read_all_events_backward_returns_correct_events_starting_in_the_middle_of_tf() { var pos = new TFPos(_postCommitPos, _p4.LogPosition); // p3 post position - var res1 = ReadIndex.ReadAllEventsBackward(pos, 10); + var res1 = ReadIndex.ReadAllEventsBackward(pos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(4, res1.Records.Count); Assert.AreEqual(_p3.EventId, res1.Records[0].Event.EventId); @@ -342,7 +342,7 @@ public void all_records_can_be_read_sequentially_page_by_page_in_backward_pass() int count = 0; var pos = GetBackwardReadPos(); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsBackward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsBackward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count].EventId, result.Records[0].Event.EventId); pos = result.NextPos; @@ -366,7 +366,7 @@ public void position_returned_for_prev_page_when_traversing_forward_allow_to_tra var localPos = result.PrevPos; int localCount = 0; IndexReadAllResult localResult; - while ((localResult = ReadIndex.ReadAllEventsBackward(localPos, 1)).Records.Count != 0) { + while ((localResult = ReadIndex.ReadAllEventsBackward(localPos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, localResult.Records.Count); Assert.AreEqual(recs[count - 1 - localCount].EventId, localResult.Records[0].Event.EventId); localPos = localResult.NextPos; @@ -387,7 +387,7 @@ public void position_returned_for_prev_page_when_traversing_backward_allow_to_tr int count = 0; var pos = GetBackwardReadPos(); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsBackward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsBackward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count].EventId, result.Records[0].Event.EventId); diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_and_all_events_and_metaevents_are_in_one_chunk.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_and_all_events_and_metaevents_are_in_one_chunk.cs index f1fbe6d219b..7172ef47909 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_and_all_events_and_metaevents_are_in_one_chunk.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_and_all_events_and_metaevents_are_in_one_chunk.cs @@ -62,7 +62,7 @@ public void the_stream_is_absent_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); Assert.IsEmpty(ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Where(x => x.Event.EventStreamId == "test")); - Assert.IsEmpty(ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records + Assert.IsEmpty(ReadIndex.ReadAllEventsBackward(headOfTf, 1000, ITransactionFileTracker.NoOp).Records .Where(x => x.Event.EventStreamId == "test")); } @@ -71,7 +71,7 @@ public void the_metastream_is_absent_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); Assert.IsEmpty(ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Where(x => x.Event.EventStreamId == "$$test")); - Assert.IsEmpty(ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records + Assert.IsEmpty(ReadIndex.ReadAllEventsBackward(headOfTf, 1000, ITransactionFileTracker.NoOp).Records .Where(x => x.Event.EventStreamId == "$$test")); } } diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_events_are_in_multiple_chunks.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_events_are_in_multiple_chunks.cs index 525fac9e935..906c7be68ee 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_events_are_in_multiple_chunks.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_events_are_in_multiple_chunks.cs @@ -78,7 +78,7 @@ public void the_stream_is_present_physically() { ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Count(x => x.Event.EventStreamId == "test")); Assert.AreEqual(1, - ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records.Count(x => x.Event.EventStreamId == "test")); + ReadIndex.ReadAllEventsBackward(headOfTf, 1000, ITransactionFileTracker.NoOp).Records.Count(x => x.Event.EventStreamId == "test")); } [Test] @@ -88,7 +88,7 @@ public void the_metastream_is_present_physically() { ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Count(x => x.Event.EventStreamId == "$$test")); Assert.AreEqual(1, - ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records.Count(x => x.Event.EventStreamId == "$$test")); + ReadIndex.ReadAllEventsBackward(headOfTf, 1000, ITransactionFileTracker.NoOp).Records.Count(x => x.Event.EventStreamId == "$$test")); } } } diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_events_are_in_multiple_chunks_2.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_events_are_in_multiple_chunks_2.cs index 9c81eb7f204..c3733fbadbc 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_events_are_in_multiple_chunks_2.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_events_are_in_multiple_chunks_2.cs @@ -82,7 +82,7 @@ public void the_stream_is_present_physically() { ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Count(x => x.Event.EventStreamId == "test")); Assert.AreEqual(1, - ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records.Count(x => x.Event.EventStreamId == "test")); + ReadIndex.ReadAllEventsBackward(headOfTf, 1000, ITransactionFileTracker.NoOp).Records.Count(x => x.Event.EventStreamId == "test")); } [Test] @@ -92,7 +92,7 @@ public void the_metastream_is_present_physically() { ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Count(x => x.Event.EventStreamId == "$$test")); Assert.AreEqual(1, - ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records.Count(x => x.Event.EventStreamId == "$$test")); + ReadIndex.ReadAllEventsBackward(headOfTf, 1000, ITransactionFileTracker.NoOp).Records.Count(x => x.Event.EventStreamId == "$$test")); } } } diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_metaevents_are_in_multiple_chunks.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_metaevents_are_in_multiple_chunks.cs index 5741334b0e2..3cf92a2a83e 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_metaevents_are_in_multiple_chunks.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_metaevents_are_in_multiple_chunks.cs @@ -80,7 +80,7 @@ public void the_stream_is_present_physically() { ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Count(x => x.Event.EventStreamId == "test")); Assert.AreEqual(1, - ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records.Count(x => x.Event.EventStreamId == "test")); + ReadIndex.ReadAllEventsBackward(headOfTf, 1000, ITransactionFileTracker.NoOp).Records.Count(x => x.Event.EventStreamId == "test")); } [Test] @@ -90,7 +90,7 @@ public void the_metastream_is_present_physically() { ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Count(x => x.Event.EventStreamId == "$$test")); Assert.AreEqual(1, - ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records.Count(x => x.Event.EventStreamId == "$$test")); + ReadIndex.ReadAllEventsBackward(headOfTf, 1000, ITransactionFileTracker.NoOp).Records.Count(x => x.Event.EventStreamId == "$$test")); } } } diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_with_log_version_0_but_some_events_are_in_multiple_chunks.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_with_log_version_0_but_some_events_are_in_multiple_chunks.cs index e12d81abe87..768f7afd711 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_with_log_version_0_but_some_events_are_in_multiple_chunks.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_with_log_version_0_but_some_events_are_in_multiple_chunks.cs @@ -77,7 +77,7 @@ public void the_stream_is_present_physically() { ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Count(x => x.Event.EventStreamId == "test")); Assert.AreEqual(1, - ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records.Count(x => x.Event.EventStreamId == "test")); + ReadIndex.ReadAllEventsBackward(headOfTf, 1000, ITransactionFileTracker.NoOp).Records.Count(x => x.Event.EventStreamId == "test")); } [Test] @@ -87,7 +87,7 @@ public void the_metastream_is_present_physically() { ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Count(x => x.Event.EventStreamId == "$$test"), "Read $$test stream forward"); Assert.AreEqual(1, - ReadIndex.ReadAllEventsBackward(headOfTf, 10).Records.Count(x => x.Event.EventStreamId == "$$test"), + ReadIndex.ReadAllEventsBackward(headOfTf, 10, ITransactionFileTracker.NoOp).Records.Count(x => x.Event.EventStreamId == "$$test"), "Read $$test stream backward"); } } diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_with_log_record_version_0.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_with_log_record_version_0.cs index 8c9a5bdb0ee..ea78d3b6c24 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_with_log_record_version_0.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_with_log_record_version_0.cs @@ -56,7 +56,7 @@ public void the_stream_is_absent_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); Assert.IsEmpty(ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Where(x => x.Event.EventStreamId == "test")); - Assert.IsEmpty(ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records + Assert.IsEmpty(ReadIndex.ReadAllEventsBackward(headOfTf, 1000, ITransactionFileTracker.NoOp).Records .Where(x => x.Event.EventStreamId == "test")); } @@ -65,7 +65,7 @@ public void the_metastream_is_absent_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); Assert.IsEmpty(ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Where(x => x.Event.EventStreamId == "$$test")); - Assert.IsEmpty(ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records + Assert.IsEmpty(ReadIndex.ReadAllEventsBackward(headOfTf, 1000, ITransactionFileTracker.NoOp).Records .Where(x => x.Event.EventStreamId == "$$test")); } } diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_with_mixed_log_record_version_0_and_version_1.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_with_mixed_log_record_version_0_and_version_1.cs index b60d651650b..68a36e254c0 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_with_mixed_log_record_version_0_and_version_1.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_with_mixed_log_record_version_0_and_version_1.cs @@ -79,7 +79,7 @@ public void the_stream_is_absent_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); Assert.IsEmpty(ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Where(x => x.Event.EventStreamId == _deletedStream)); - Assert.IsEmpty(ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records + Assert.IsEmpty(ReadIndex.ReadAllEventsBackward(headOfTf, 1000, ITransactionFileTracker.NoOp).Records .Where(x => x.Event.EventStreamId == _deletedStream)); } @@ -88,7 +88,7 @@ public void the_metastream_is_absent_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); Assert.IsEmpty(ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Where(x => x.Event.EventStreamId == _deletedMetaStream)); - Assert.IsEmpty(ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records + Assert.IsEmpty(ReadIndex.ReadAllEventsBackward(headOfTf, 1000, ITransactionFileTracker.NoOp).Records .Where(x => x.Event.EventStreamId == _deletedMetaStream)); } diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_writing_delete_prepare_without_commit_on_stream_spanning_through_2_chunks_in_db_with_2_chunks.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_writing_delete_prepare_without_commit_on_stream_spanning_through_2_chunks_in_db_with_2_chunks.cs index 4f86324fee2..c720dcee3cf 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_writing_delete_prepare_without_commit_on_stream_spanning_through_2_chunks_in_db_with_2_chunks.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_writing_delete_prepare_without_commit_on_stream_spanning_through_2_chunks_in_db_with_2_chunks.cs @@ -67,7 +67,7 @@ public void read_all_forward_returns_all_events() { [Test] public void read_all_backward_returns_all_events() { - var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords() + var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(2, events.Length); diff --git a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_multievent_sequential_write_request_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_multievent_sequential_write_request_read_index_should.cs index 978901fa8da..6d5b81b02f6 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_multievent_sequential_write_request_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_multievent_sequential_write_request_read_index_should.cs @@ -98,7 +98,7 @@ public void read_all_events_forward_returns_all_events_in_correct_order() { [Test] public void read_all_events_backward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).Records; + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(3, records.Count); Assert.AreEqual(_p1, records[2].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_and_some_uncommited_prepares_in_the_end_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_and_some_uncommited_prepares_in_the_end_read_index_should.cs index 90ba6d45e5c..20a284d72fd 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_and_some_uncommited_prepares_in_the_end_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_and_some_uncommited_prepares_in_the_end_read_index_should.cs @@ -73,7 +73,7 @@ public void read_all_events_forward_returns_all_events_in_correct_order() { [Test] public void read_all_events_backward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 10).Records; + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(5, records.Count); Assert.AreEqual(_p5, records[0].Event); @@ -93,7 +93,7 @@ public void [Test] public void read_all_events_backwards_returns_nothing_when_prepare_position_is_smaller_than_first_prepare_in_commit() { - var records = ReadIndex.ReadAllEventsBackward(new TFPos(_t2CommitPos, 0), 10).Records; + var records = ReadIndex.ReadAllEventsBackward(new TFPos(_t2CommitPos, 0), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(0, records.Count); } @@ -107,7 +107,7 @@ public void read_all_events_forward_returns_correct_events_starting_in_the_middl Assert.AreEqual(_p3, res1.Records[2].Event); Assert.AreEqual(_p5, res1.Records[3].Event); - var res2 = ReadIndex.ReadAllEventsBackward(res1.PrevPos, 10); + var res2 = ReadIndex.ReadAllEventsBackward(res1.PrevPos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(1, res2.Records.Count); Assert.AreEqual(_p2, res2.Records[0].Event); } @@ -115,7 +115,7 @@ public void read_all_events_forward_returns_correct_events_starting_in_the_middl [Test] public void read_all_events_backward_returns_correct_events_starting_in_the_middle_of_tf() { var pos = new TFPos(_pos6, _p4.LogPosition); // p3 post-pos - var res1 = ReadIndex.ReadAllEventsBackward(pos, 10); + var res1 = ReadIndex.ReadAllEventsBackward(pos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(4, res1.Records.Count); Assert.AreEqual(_p3, res1.Records[0].Event); @@ -152,7 +152,7 @@ public void all_records_can_be_read_sequentially_page_by_page_in_backward_pass() int count = 0; var pos = GetBackwardReadPos(); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsBackward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsBackward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); pos = result.NextPos; @@ -176,7 +176,7 @@ public void position_returned_for_prev_page_when_traversing_forward_allow_to_tra var localPos = result.PrevPos; int localCount = 0; IndexReadAllResult localResult; - while ((localResult = ReadIndex.ReadAllEventsBackward(localPos, 1)).Records.Count != 0) { + while ((localResult = ReadIndex.ReadAllEventsBackward(localPos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, localResult.Records.Count); Assert.AreEqual(recs[count - 1 - localCount], localResult.Records[0].Event); localPos = localResult.NextPos; @@ -197,7 +197,7 @@ public void position_returned_for_prev_page_when_traversing_backward_allow_to_tr int count = 0; var pos = GetBackwardReadPos(); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsBackward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsBackward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); @@ -228,7 +228,7 @@ public void int count = 0; IndexReadAllResult result; TFPos pos = res1.PrevPos; - while ((result = ReadIndex.ReadAllEventsBackward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsBackward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); pos = result.NextPos; @@ -247,7 +247,7 @@ public void reading_all_forward_at_the_very_end_returns_prev_pos_that_allows_to_ int count = 0; IndexReadAllResult result; TFPos pos = res1.PrevPos; - while ((result = ReadIndex.ReadAllEventsBackward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsBackward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); pos = result.NextPos; @@ -260,7 +260,7 @@ public void reading_all_forward_at_the_very_end_returns_prev_pos_that_allows_to_ [Test] public void reading_all_backward_at_position_with_no_commits_before_returns_prev_pos_that_allows_to_traverse_forward() { - var res1 = ReadIndex.ReadAllEventsBackward(new TFPos(_t2CommitPos, int.MaxValue), 100); + var res1 = ReadIndex.ReadAllEventsBackward(new TFPos(_t2CommitPos, int.MaxValue), 100, ITransactionFileTracker.NoOp); Assert.AreEqual(0, res1.Records.Count); var recs = new[] {_p2, _p4, _p1, _p3, _p5}; @@ -279,7 +279,7 @@ public void [Test] public void reading_all_backward_at_the_very_beginning_returns_prev_pos_that_allows_to_traverse_forward() { - var res1 = ReadIndex.ReadAllEventsBackward(new TFPos(0, int.MaxValue), 100); + var res1 = ReadIndex.ReadAllEventsBackward(new TFPos(0, int.MaxValue), 100, ITransactionFileTracker.NoOp); Assert.AreEqual(0, res1.Records.Count); var recs = new[] {_p2, _p4, _p1, _p3, _p5}; diff --git a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_and_some_uncommited_prepares_spanning_few_chunks_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_and_some_uncommited_prepares_spanning_few_chunks_read_index_should.cs index 3d4d1ca4424..3b9ab810377 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_and_some_uncommited_prepares_spanning_few_chunks_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_and_some_uncommited_prepares_spanning_few_chunks_read_index_should.cs @@ -75,7 +75,7 @@ public void read_all_events_forward_returns_all_events_in_correct_order() { [Test] public void read_all_events_backward_returns_all_events_in_correct_order() { var pos = GetBackwardReadPos(); - var records = ReadIndex.ReadAllEventsBackward(pos, 10).Records; + var records = ReadIndex.ReadAllEventsBackward(pos, 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(5, records.Count); Assert.AreEqual(_p5, records[0].Event); @@ -95,7 +95,7 @@ public void [Test] public void read_all_events_backwards_returns_nothing_when_prepare_position_is_smaller_than_first_prepare_in_commit() { - var records = ReadIndex.ReadAllEventsBackward(new TFPos(_t2CommitPos, 0), 10).Records; + var records = ReadIndex.ReadAllEventsBackward(new TFPos(_t2CommitPos, 0), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(0, records.Count); } @@ -109,7 +109,7 @@ public void read_all_events_forward_returns_correct_events_starting_in_the_middl Assert.AreEqual(_p3, res1.Records[2].Event); Assert.AreEqual(_p5, res1.Records[3].Event); - var res2 = ReadIndex.ReadAllEventsBackward(res1.PrevPos, 10); + var res2 = ReadIndex.ReadAllEventsBackward(res1.PrevPos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(1, res2.Records.Count); Assert.AreEqual(_p2, res2.Records[0].Event); } @@ -117,7 +117,7 @@ public void read_all_events_forward_returns_correct_events_starting_in_the_middl [Test] public void read_all_events_backward_returns_correct_events_starting_in_the_middle_of_tf() { var pos = new TFPos(_pos6, _p4.LogPosition); // p3 post-pos - var res1 = ReadIndex.ReadAllEventsBackward(pos, 10); + var res1 = ReadIndex.ReadAllEventsBackward(pos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(4, res1.Records.Count); Assert.AreEqual(_p3, res1.Records[0].Event); @@ -154,7 +154,7 @@ public void all_records_can_be_read_sequentially_page_by_page_in_backward_pass() int count = 0; var pos = GetBackwardReadPos(); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsBackward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsBackward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); pos = result.NextPos; @@ -178,7 +178,7 @@ public void position_returned_for_prev_page_when_traversing_forward_allow_to_tra var localPos = result.PrevPos; int localCount = 0; IndexReadAllResult localResult; - while ((localResult = ReadIndex.ReadAllEventsBackward(localPos, 1)).Records.Count != 0) { + while ((localResult = ReadIndex.ReadAllEventsBackward(localPos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, localResult.Records.Count); Assert.AreEqual(recs[count - 1 - localCount], localResult.Records[0].Event); localPos = localResult.NextPos; @@ -199,7 +199,7 @@ public void position_returned_for_prev_page_when_traversing_backward_allow_to_tr int count = 0; var pos = GetBackwardReadPos(); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsBackward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsBackward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); @@ -230,7 +230,7 @@ public void int count = 0; IndexReadAllResult result; TFPos pos = res1.PrevPos; - while ((result = ReadIndex.ReadAllEventsBackward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsBackward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); pos = result.NextPos; @@ -249,7 +249,7 @@ public void reading_all_forward_at_the_very_end_returns_prev_pos_that_allows_to_ int count = 0; IndexReadAllResult result; TFPos pos = res1.PrevPos; - while ((result = ReadIndex.ReadAllEventsBackward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsBackward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); pos = result.NextPos; @@ -262,7 +262,7 @@ public void reading_all_forward_at_the_very_end_returns_prev_pos_that_allows_to_ [Test] public void reading_all_backward_at_position_with_no_commits_before_returns_prev_pos_that_allows_to_traverse_forward() { - var res1 = ReadIndex.ReadAllEventsBackward(new TFPos(_t2CommitPos, int.MaxValue), 100); + var res1 = ReadIndex.ReadAllEventsBackward(new TFPos(_t2CommitPos, int.MaxValue), 100, ITransactionFileTracker.NoOp); Assert.AreEqual(0, res1.Records.Count); var recs = new[] {_p2, _p4, _p1, _p3, _p5}; @@ -281,7 +281,7 @@ public void [Test] public void reading_all_backward_at_the_very_beginning_returns_prev_pos_that_allows_to_traverse_forward() { - var res1 = ReadIndex.ReadAllEventsBackward(new TFPos(0, int.MaxValue), 100); + var res1 = ReadIndex.ReadAllEventsBackward(new TFPos(0, int.MaxValue), 100, ITransactionFileTracker.NoOp); Assert.AreEqual(0, res1.Records.Count); var recs = new[] {_p2, _p4, _p1, _p3, _p5}; diff --git a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_read_index_should.cs index 09dd7e873a1..0b6d62c2ddd 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_read_index_should.cs @@ -174,7 +174,7 @@ public void read_all_events_forward_returns_all_events_in_correct_order() { [Test] public void read_all_events_backward_returns_all_events_in_correct_order() { var pos = GetBackwardReadPos(); - var records = ReadIndex.ReadAllEventsBackward(pos, 10).Records; + var records = ReadIndex.ReadAllEventsBackward(pos, 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(5, records.Count); Assert.AreEqual(_p5, records[0].Event); @@ -194,7 +194,7 @@ public void [Test] public void read_all_events_backwards_returns_nothing_when_prepare_position_is_smaller_than_first_prepare_in_commit() { - var records = ReadIndex.ReadAllEventsBackward(new TFPos(_t2CommitPos, 0), 10).Records; + var records = ReadIndex.ReadAllEventsBackward(new TFPos(_t2CommitPos, 0), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(0, records.Count); } @@ -208,7 +208,7 @@ public void read_all_events_forward_returns_correct_events_starting_in_the_middl Assert.AreEqual(_p3, res1.Records[2].Event); Assert.AreEqual(_p5, res1.Records[3].Event); - var res2 = ReadIndex.ReadAllEventsBackward(res1.PrevPos, 10); + var res2 = ReadIndex.ReadAllEventsBackward(res1.PrevPos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(1, res2.Records.Count); Assert.AreEqual(_p2, res2.Records[0].Event); } @@ -216,7 +216,7 @@ public void read_all_events_forward_returns_correct_events_starting_in_the_middl [Test] public void read_all_events_backward_returns_correct_events_starting_in_the_middle_of_tf() { var pos = new TFPos(Db.Config.WriterCheckpoint.Read(), _p4.LogPosition); // p3 post-pos - var res1 = ReadIndex.ReadAllEventsBackward(pos, 10); + var res1 = ReadIndex.ReadAllEventsBackward(pos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(4, res1.Records.Count); Assert.AreEqual(_p3, res1.Records[0].Event); @@ -253,7 +253,7 @@ public void all_records_can_be_read_sequentially_page_by_page_in_backward_pass() int count = 0; var pos = GetBackwardReadPos(); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsBackward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsBackward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); pos = result.NextPos; @@ -277,7 +277,7 @@ public void position_returned_for_prev_page_when_traversing_forward_allow_to_tra var localPos = result.PrevPos; int localCount = 0; IndexReadAllResult localResult; - while ((localResult = ReadIndex.ReadAllEventsBackward(localPos, 1)).Records.Count != 0) { + while ((localResult = ReadIndex.ReadAllEventsBackward(localPos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, localResult.Records.Count); Assert.AreEqual(recs[count - 1 - localCount], localResult.Records[0].Event); localPos = localResult.NextPos; @@ -298,7 +298,7 @@ public void position_returned_for_prev_page_when_traversing_backward_allow_to_tr int count = 0; var pos = GetBackwardReadPos(); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsBackward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsBackward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_spanning_few_chunks_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_spanning_few_chunks_read_index_should.cs index ce5edc1a5d6..c4d326fc93e 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_spanning_few_chunks_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_spanning_few_chunks_read_index_should.cs @@ -176,7 +176,7 @@ public void read_all_events_forward_returns_all_events_in_correct_order() { [Test] public void read_all_events_backward_returns_all_events_in_correct_order() { var pos = GetBackwardReadPos(); - var records = ReadIndex.ReadAllEventsBackward(pos, 10).Records; + var records = ReadIndex.ReadAllEventsBackward(pos, 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(5, records.Count); Assert.AreEqual(_p5, records[0].Event); @@ -196,7 +196,7 @@ public void [Test] public void read_all_events_backwards_returns_nothing_when_prepare_position_is_smaller_than_first_prepare_in_commit() { - var records = ReadIndex.ReadAllEventsBackward(new TFPos(_t2CommitPos, 0), 10).Records; + var records = ReadIndex.ReadAllEventsBackward(new TFPos(_t2CommitPos, 0), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(0, records.Count); } @@ -210,7 +210,7 @@ public void read_all_events_forward_returns_correct_events_starting_in_the_middl Assert.AreEqual(_p3, res1.Records[2].Event); Assert.AreEqual(_p5, res1.Records[3].Event); - var res2 = ReadIndex.ReadAllEventsBackward(res1.PrevPos, 10); + var res2 = ReadIndex.ReadAllEventsBackward(res1.PrevPos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(1, res2.Records.Count); Assert.AreEqual(_p2, res2.Records[0].Event); } @@ -218,7 +218,7 @@ public void read_all_events_forward_returns_correct_events_starting_in_the_middl [Test] public void read_all_events_backward_returns_correct_events_starting_in_the_middle_of_tf() { var pos = new TFPos(Db.Config.WriterCheckpoint.Read(), _p4.LogPosition); // p3 post-pos - var res1 = ReadIndex.ReadAllEventsBackward(pos, 10); + var res1 = ReadIndex.ReadAllEventsBackward(pos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(4, res1.Records.Count); Assert.AreEqual(_p3, res1.Records[0].Event); @@ -255,7 +255,7 @@ public void all_records_can_be_read_sequentially_page_by_page_in_backward_pass() int count = 0; var pos = GetBackwardReadPos(); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsBackward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsBackward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); pos = result.NextPos; @@ -279,7 +279,7 @@ public void position_returned_for_prev_page_when_traversing_forward_allow_to_tra var localPos = result.PrevPos; int localCount = 0; IndexReadAllResult localResult; - while ((localResult = ReadIndex.ReadAllEventsBackward(localPos, 1)).Records.Count != 0) { + while ((localResult = ReadIndex.ReadAllEventsBackward(localPos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, localResult.Records.Count); Assert.AreEqual(recs[count - 1 - localCount], localResult.Records[0].Event); localPos = localResult.NextPos; @@ -300,7 +300,7 @@ public void position_returned_for_prev_page_when_traversing_backward_allow_to_tr int count = 0; var pos = GetBackwardReadPos(); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsBackward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsBackward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); diff --git a/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_few_chunks_with_index_on_disk_and_then_reopening_db.cs b/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_few_chunks_with_index_on_disk_and_then_reopening_db.cs index d40d1fdef4b..a551fc0c8d6 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_few_chunks_with_index_on_disk_and_then_reopening_db.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_few_chunks_with_index_on_disk_and_then_reopening_db.cs @@ -135,7 +135,7 @@ public void read_all_returns_only_survived_events() { [Test] public void read_all_backward_doesnt_return_truncated_records() { - var res = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100); + var res = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp); var records = res.EventRecords() .Select(r => r.Event) .ToArray(); diff --git a/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_into_the_middle_of_scavenged_chunk_with_index_in_memory_and_then_reopening_db.cs b/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_into_the_middle_of_scavenged_chunk_with_index_in_memory_and_then_reopening_db.cs index d441ea5ba5e..e49f0b33264 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_into_the_middle_of_scavenged_chunk_with_index_in_memory_and_then_reopening_db.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_into_the_middle_of_scavenged_chunk_with_index_in_memory_and_then_reopening_db.cs @@ -138,7 +138,7 @@ public void read_all_forward_returns_only_survived_events() { [Test] public void read_all_backward_doesnt_return_truncated_records() { - var res = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100); + var res = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp); var records = res.EventRecords() .Select(r => r.Event) .ToArray(); diff --git a/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_single_uncompleted_chunk_with_index_in_memory_and_then_reopening_db.cs b/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_single_uncompleted_chunk_with_index_in_memory_and_then_reopening_db.cs index 7eed7fbd227..5a813096add 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_single_uncompleted_chunk_with_index_in_memory_and_then_reopening_db.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_single_uncompleted_chunk_with_index_in_memory_and_then_reopening_db.cs @@ -78,7 +78,7 @@ public void read_all_forward_doesnt_return_truncated_records() { [Test] public void read_all_backward_doesnt_return_truncated_records() { - var res = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100); + var res = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp); var records = res.EventRecords() .Select(r => r.Event) .ToArray(); diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/IReadIndex.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/IReadIndex.cs index 822b32824a4..851d86192b5 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/IReadIndex.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/IReadIndex.cs @@ -21,7 +21,7 @@ public interface IReadIndex { /// Returns event records in the reverse sequence they were committed into TF. /// Positions is specified as post-positions (pointer after the end of record). /// - IndexReadAllResult ReadAllEventsBackward(TFPos pos, int maxCount, ITransactionFileTracker tracker = null); //qq make not optional + IndexReadAllResult ReadAllEventsBackward(TFPos pos, int maxCount, ITransactionFileTracker tracker); /// /// Returns event records whose eventType matches the given EventFilter in the sequence they were committed into TF. diff --git a/src/EventStore.Core/Services/Storage/StorageReaderWorker.cs b/src/EventStore.Core/Services/Storage/StorageReaderWorker.cs index f384455acd3..202c6b681c0 100644 --- a/src/EventStore.Core/Services/Storage/StorageReaderWorker.cs +++ b/src/EventStore.Core/Services/Storage/StorageReaderWorker.cs @@ -477,7 +477,7 @@ private ClientMessage.ReadAllEventsBackwardCompleted ReadAllEventsBackward( return NoData(msg, ReadAllResult.NotModified, pos, lastIndexedPosition); var tracker = _trackers.GetOrAdd(msg.User.Identity.Name); - var res = _readIndex.ReadAllEventsBackward(pos, msg.MaxCount, tracker); + var res = _readIndex.ReadAllEventsBackward(pos, msg.MaxCount, ITransactionFileTracker.NoOp); var resolved = ResolveReadAllResult(res.Records, msg.ResolveLinkTos, msg.User); if (resolved == null) return NoData(msg, ReadAllResult.AccessDenied, pos, lastIndexedPosition); From 0bc391943e5e369a0f37a2fb96d8d439d3579c9d Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Fri, 22 Nov 2024 11:14:52 +0000 Subject: [PATCH 11/38] more plumbing (tfchunkreadside complete) --- .../TransactionLog/Chunks/TFChunk/TFChunk.cs | 12 ++--- .../Chunks/TFChunk/TFChunkReadSide.cs | 46 +++++++++---------- .../DbAccess/ChunkReaderForAccumulator.cs | 2 +- 3 files changed, 30 insertions(+), 30 deletions(-) diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs index 49342573fec..6ef0ed40bb3 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs @@ -619,7 +619,7 @@ public long GetActualRawPosition(long logicalPosition) { if (logicalPosition < 0) throw new ArgumentOutOfRangeException(nameof(logicalPosition)); - var actualPosition = _readSide.GetActualPosition(logicalPosition); + var actualPosition = _readSide.GetActualPosition(logicalPosition, ITransactionFileTracker.NoOp); if (actualPosition < 0) return -1; @@ -749,12 +749,12 @@ public void UnCacheFromMemory() { } public bool ExistsAt(long logicalPosition) { - return _readSide.ExistsAt(logicalPosition); + return _readSide.ExistsAt(logicalPosition, ITransactionFileTracker.NoOp); } public void OptimizeExistsAt() { if (!ChunkHeader.IsScavenged) return; - ((TFChunkReadSideScavenged)_readSide).OptimizeExistsAt(); + ((TFChunkReadSideScavenged)_readSide).OptimizeExistsAt(ITransactionFileTracker.NoOp); } public void DeOptimizeExistsAt() { @@ -763,7 +763,7 @@ public void DeOptimizeExistsAt() { } public RecordReadResult TryReadAt(long logicalPosition, bool couldBeScavenged) { - return _readSide.TryReadAt(logicalPosition, couldBeScavenged); + return _readSide.TryReadAt(logicalPosition, couldBeScavenged, ITransactionFileTracker.NoOp); } public RecordReadResult TryReadFirst(ITransactionFileTracker tracker) { @@ -774,8 +774,8 @@ public RecordReadResult TryReadClosestForward(long logicalPosition, ITransaction return _readSide.TryReadClosestForward(logicalPosition, tracker); } - public RawReadResult TryReadClosestForwardRaw(long logicalPosition, Func getBuffer) { - return _readSide.TryReadClosestForwardRaw(logicalPosition, getBuffer); + public RawReadResult TryReadClosestForwardRaw(long logicalPosition, Func getBuffer, ITransactionFileTracker tracker) { + return _readSide.TryReadClosestForwardRaw(logicalPosition, getBuffer, tracker); } public RecordReadResult TryReadLast(ITransactionFileTracker tracker) { diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunkReadSide.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunkReadSide.cs index 7d7ced1e661..6bbebf13d12 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunkReadSide.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunkReadSide.cs @@ -14,12 +14,12 @@ public interface IChunkReadSide { void RequestCaching(); void Uncache(); - bool ExistsAt(long logicalPosition); - long GetActualPosition(long logicalPosition); - RecordReadResult TryReadAt(long logicalPosition, bool couldBeScavenged); + bool ExistsAt(long logicalPosition, ITransactionFileTracker tracker); + long GetActualPosition(long logicalPosition, ITransactionFileTracker tracker); + RecordReadResult TryReadAt(long logicalPosition, bool couldBeScavenged, ITransactionFileTracker tracker); RecordReadResult TryReadFirst(ITransactionFileTracker tracker); RecordReadResult TryReadClosestForward(long logicalPosition, ITransactionFileTracker tracker); - RawReadResult TryReadClosestForwardRaw(long logicalPosition, Func getBuffer); + RawReadResult TryReadClosestForwardRaw(long logicalPosition, Func getBuffer, ITransactionFileTracker tracker); RecordReadResult TryReadLast(ITransactionFileTracker tracker); RecordReadResult TryReadClosestBackward(long logicalPosition, ITransactionFileTracker tracker); } @@ -38,11 +38,11 @@ public void Uncache() { // do nothing } - public bool ExistsAt(long logicalPosition) { + public bool ExistsAt(long logicalPosition, ITransactionFileTracker tracker) { return logicalPosition >= 0 && logicalPosition < Chunk.LogicalDataSize; } - public long GetActualPosition(long logicalPosition) { + public long GetActualPosition(long logicalPosition, ITransactionFileTracker tracker) { Ensure.Nonnegative(logicalPosition, nameof(logicalPosition)); if (logicalPosition >= Chunk.LogicalDataSize) @@ -51,8 +51,8 @@ public long GetActualPosition(long logicalPosition) { return logicalPosition; } - public RecordReadResult TryReadAt(long logicalPosition, bool couldBeScavenged) { - var workItem = Chunk.GetReaderWorkItem(ITransactionFileTracker.NoOp); //qq + public RecordReadResult TryReadAt(long logicalPosition, bool couldBeScavenged, ITransactionFileTracker tracker) { + var workItem = Chunk.GetReaderWorkItem(tracker); try { if (logicalPosition >= Chunk.LogicalDataSize) { _log.Warning( @@ -75,7 +75,7 @@ public RecordReadResult TryReadFirst(ITransactionFileTracker tracker) { } public RecordReadResult TryReadClosestForward(long logicalPosition, ITransactionFileTracker tracker) { - var workItem = Chunk.GetReaderWorkItem(tracker); //qq + var workItem = Chunk.GetReaderWorkItem(tracker); try { if (logicalPosition >= Chunk.LogicalDataSize) return RecordReadResult.Failure; @@ -90,8 +90,8 @@ public RecordReadResult TryReadClosestForward(long logicalPosition, ITransaction } } - public RawReadResult TryReadClosestForwardRaw(long logicalPosition, Func getBuffer) { - var workItem = Chunk.GetReaderWorkItem(ITransactionFileTracker.NoOp); //qq + public RawReadResult TryReadClosestForwardRaw(long logicalPosition, Func getBuffer, ITransactionFileTracker tracker) { + var workItem = Chunk.GetReaderWorkItem(tracker); try { if (logicalPosition >= Chunk.LogicalDataSize) return RawReadResult.Failure; @@ -191,9 +191,9 @@ private Midpoint[] GetOrCreateMidPoints(ReaderWorkItem workItem) { } } - public void OptimizeExistsAt() { + public void OptimizeExistsAt(ITransactionFileTracker tracker) { if (_optimizeCache && _logPositionsBloomFilter == null) - _logPositionsBloomFilter = PopulateBloomFilter(); + _logPositionsBloomFilter = PopulateBloomFilter(tracker); } public void DeOptimizeExistsAt() { @@ -201,7 +201,7 @@ public void DeOptimizeExistsAt() { _logPositionsBloomFilter = null; } - private InMemoryBloomFilter PopulateBloomFilter() { + private InMemoryBloomFilter PopulateBloomFilter(ITransactionFileTracker tracker) { var mapCount = Chunk.ChunkFooter.MapCount; if (mapCount <= 0) return null; @@ -227,7 +227,7 @@ private InMemoryBloomFilter PopulateBloomFilter() { ReaderWorkItem workItem = null; try { - workItem = Chunk.GetReaderWorkItem(ITransactionFileTracker.NoOp); //qq + workItem = Chunk.GetReaderWorkItem(tracker); foreach (var posMap in ReadPosMap(workItem, 0, mapCount)) { bf.Add(posMap.LogPos); @@ -305,11 +305,11 @@ private IEnumerable ReadPosMap(ReaderWorkItem workItem, long index, int } } - public bool ExistsAt(long logicalPosition) { + public bool ExistsAt(long logicalPosition, ITransactionFileTracker tracker) { if (CacheIsOptimized) return MayExistAt(logicalPosition); - var workItem = Chunk.GetReaderWorkItem(ITransactionFileTracker.NoOp); //qq + var workItem = Chunk.GetReaderWorkItem(tracker); try { var actualPosition = TranslateExactPosition(workItem, logicalPosition); return actualPosition >= 0 && actualPosition < Chunk.PhysicalDataSize; @@ -323,10 +323,10 @@ public bool MayExistAt(long logicalPosition) { return _logPositionsBloomFilter.MightContain(logicalPosition); } - public long GetActualPosition(long logicalPosition) { + public long GetActualPosition(long logicalPosition, ITransactionFileTracker tracker) { Ensure.Nonnegative(logicalPosition, nameof(logicalPosition)); - var workItem = Chunk.GetReaderWorkItem(ITransactionFileTracker.NoOp); //qq + var workItem = Chunk.GetReaderWorkItem(tracker); try { return TranslateExactPosition(workItem, logicalPosition); } finally { @@ -334,8 +334,8 @@ public long GetActualPosition(long logicalPosition) { } } - public RecordReadResult TryReadAt(long logicalPosition, bool couldBeScavenged) { - var workItem = Chunk.GetReaderWorkItem(ITransactionFileTracker.NoOp); //qq + public RecordReadResult TryReadAt(long logicalPosition, bool couldBeScavenged, ITransactionFileTracker tracker) { + var workItem = Chunk.GetReaderWorkItem(tracker); try { var actualPosition = TranslateExactPosition(workItem, logicalPosition); if (actualPosition == -1 || actualPosition >= Chunk.PhysicalDataSize) { @@ -416,11 +416,11 @@ public RecordReadResult TryReadClosestForward(long logicalPosition, ITransaction } } - public RawReadResult TryReadClosestForwardRaw(long logicalPosition, Func getBuffer) { + public RawReadResult TryReadClosestForwardRaw(long logicalPosition, Func getBuffer, ITransactionFileTracker tracker) { if (Chunk.ChunkFooter.MapCount == 0) return RawReadResult.Failure; - var workItem = Chunk.GetReaderWorkItem(ITransactionFileTracker.NoOp); //qq + var workItem = Chunk.GetReaderWorkItem(tracker); try { var actualPosition = TranslateClosestForwardPosition(workItem, logicalPosition); if (actualPosition == -1 || actualPosition >= Chunk.PhysicalDataSize) diff --git a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkReaderForAccumulator.cs b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkReaderForAccumulator.cs index 7bb463040e7..84f64eaca5a 100644 --- a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkReaderForAccumulator.cs +++ b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkReaderForAccumulator.cs @@ -62,7 +62,7 @@ public IEnumerable ReadChunkInto( var localPos = chunk.ChunkHeader.GetLocalLogPosition(nextPos); - var result = chunk.TryReadClosestForwardRaw(localPos, _getBuffer); + var result = chunk.TryReadClosestForwardRaw(localPos, _getBuffer, ITransactionFileTracker.NoOp); //qq plumb through all occurrences of noop if (!result.Success) { // there is no need to release the reusable buffer here since result.Success is false From fb1a59cfd05729cee03bf05d63e32f6f8c7286de Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Fri, 22 Nov 2024 11:19:39 +0000 Subject: [PATCH 12/38] more plumbing (TFChunk complete) --- .../RedactionService/EventPositionTests.cs | 3 ++- ...hunks_in_2nd_chunk__in_db_with_3_chunks.cs | 2 +- ..._having_commit_spanning_multiple_chunks.cs | 5 +++-- ...tfchunkreader_existsat_optimizer_should.cs | 5 +++-- .../tfchunk_get_actual_raw_position_should.cs | 19 ++++++++++--------- ...hen_appending_to_a_tfchunk_and_flushing.cs | 2 +- .../when_creating_tfchunk_from_empty_file.cs | 2 +- ...venged_tfchunk_with_all_records_removed.cs | 12 ++++++------ ..._reading_cached_empty_scavenged_tfchunk.cs | 2 +- .../when_reading_from_a_cached_tfchunk.cs | 2 +- ...eading_uncached_empty_scavenged_tfchunk.cs | 2 +- .../when_uncaching_a_tfchunk.cs | 2 +- ...n_writing_multiple_records_to_a_tfchunk.cs | 4 ++-- .../Services/RedactionService.cs | 3 ++- .../Telemetry/TelemetryService.cs | 3 ++- .../TransactionLog/Chunks/TFChunk/TFChunk.cs | 18 +++++++++--------- .../TransactionLog/Chunks/TFChunkReader.cs | 4 ++-- .../Chunks/TFChunkReaderExistsAtOptimizer.cs | 2 +- 18 files changed, 49 insertions(+), 43 deletions(-) diff --git a/src/EventStore.Core.Tests/Services/RedactionService/EventPositionTests.cs b/src/EventStore.Core.Tests/Services/RedactionService/EventPositionTests.cs index 2a8375b1256..0ba536fb3e7 100644 --- a/src/EventStore.Core.Tests/Services/RedactionService/EventPositionTests.cs +++ b/src/EventStore.Core.Tests/Services/RedactionService/EventPositionTests.cs @@ -4,6 +4,7 @@ using EventStore.Core.Data.Redaction; using EventStore.Core.Messages; using EventStore.Core.Messaging; +using EventStore.Core.TransactionLog; using FluentAssertions; using NUnit.Framework; @@ -21,7 +22,7 @@ private void WriteEvent(string streamId, long eventNumber, string data) { _positions[eventNumber] = new(); var chunk = Db.Manager.GetChunkFor(eventRecord.LogPosition); - var eventOffset = chunk.GetActualRawPosition(eventRecord.LogPosition); + var eventOffset = chunk.GetActualRawPosition(eventRecord.LogPosition, ITransactionFileTracker.NoOp); var eventPosition = new EventPosition( eventRecord.LogPosition, Path.GetFileName(chunk.FileName), chunk.ChunkHeader.Version, chunk.IsReadOnly, (uint)eventOffset); _positions[eventNumber].Add(eventPosition); diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_2nd_chunk__in_db_with_3_chunks.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_2nd_chunk__in_db_with_3_chunks.cs index 2aeb249e12f..1b89c0ad9e7 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_2nd_chunk__in_db_with_3_chunks.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_2nd_chunk__in_db_with_3_chunks.cs @@ -94,7 +94,7 @@ public void last_physical_record_from_scavenged_stream_should_remain() { var chunk = Db.Manager.GetChunk(1); var chunkPos = (int)(_event7.LogPosition % Db.Config.ChunkSize); - var res = chunk.TryReadAt(chunkPos, couldBeScavenged: false); + var res = chunk.TryReadAt(chunkPos, couldBeScavenged: false, tracker: ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); } diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_having_commit_spanning_multiple_chunks.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_having_commit_spanning_multiple_chunks.cs index cda7dce1e1c..c5f115725c1 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_having_commit_spanning_multiple_chunks.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_having_commit_spanning_multiple_chunks.cs @@ -1,5 +1,6 @@ using System; using System.Collections.Generic; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; @@ -60,12 +61,12 @@ protected override void WriteTestScenario() { public void all_chunks_are_merged_and_scavenged() { foreach (var rec in _scavenged) { var chunk = Db.Manager.GetChunkFor(rec.LogPosition); - Assert.IsFalse(chunk.TryReadAt(rec.LogPosition, couldBeScavenged: true).Success); + Assert.IsFalse(chunk.TryReadAt(rec.LogPosition, couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp).Success); } foreach (var rec in _survivors) { var chunk = Db.Manager.GetChunkFor(rec.LogPosition); - var res = chunk.TryReadAt(rec.LogPosition, couldBeScavenged: false); + var res = chunk.TryReadAt(rec.LogPosition, couldBeScavenged: false, tracker: ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(rec, res.LogRecord); } diff --git a/src/EventStore.Core.Tests/TransactionLog/Optimization/tfchunkreader_existsat_optimizer_should.cs b/src/EventStore.Core.Tests/TransactionLog/Optimization/tfchunkreader_existsat_optimizer_should.cs index 38b79fef7bf..f7cf85cead9 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Optimization/tfchunkreader_existsat_optimizer_should.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Optimization/tfchunkreader_existsat_optimizer_should.cs @@ -1,5 +1,6 @@ using System; using System.Collections.Generic; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.Chunks.TFChunk; using EventStore.Core.TransactionLog.LogRecords; @@ -66,14 +67,14 @@ public void posmap_items_should_exist_in_chunk() { //before optimization Assert.AreEqual(false, _existsAtOptimizer.IsOptimized(chunk)); foreach (var p in posmap) { - Assert.AreEqual(true, chunk.ExistsAt(p.LogPos)); + Assert.AreEqual(true, chunk.ExistsAt(p.LogPos, ITransactionFileTracker.NoOp)); } //after optimization _existsAtOptimizer.Optimize(chunk); Assert.AreEqual(true, _existsAtOptimizer.IsOptimized(chunk)); foreach (var p in posmap) { - Assert.AreEqual(true, chunk.ExistsAt(p.LogPos)); + Assert.AreEqual(true, chunk.ExistsAt(p.LogPos, ITransactionFileTracker.NoOp)); } chunk.MarkForDeletion(); diff --git a/src/EventStore.Core.Tests/TransactionLog/tfchunk_get_actual_raw_position_should.cs b/src/EventStore.Core.Tests/TransactionLog/tfchunk_get_actual_raw_position_should.cs index 804532598c7..1c386d91075 100644 --- a/src/EventStore.Core.Tests/TransactionLog/tfchunk_get_actual_raw_position_should.cs +++ b/src/EventStore.Core.Tests/TransactionLog/tfchunk_get_actual_raw_position_should.cs @@ -2,6 +2,7 @@ using System.Collections.Generic; using System.IO; using EventStore.Core.LogAbstraction; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.Chunks.TFChunk; using EventStore.Core.TransactionLog.LogRecords; @@ -69,7 +70,7 @@ public void return_correct_positions_for_an_incomplete_unscavenged_chunk() { Assert.AreEqual(numEvents, logPositions.Count); foreach(var logPos in logPositions) - Assert.AreEqual(ChunkHeader.Size + logPos, chunk.GetActualRawPosition(logPos)); + Assert.AreEqual(ChunkHeader.Size + logPos, chunk.GetActualRawPosition(logPos, ITransactionFileTracker.NoOp)); Assert.IsNull(posMap); } @@ -87,7 +88,7 @@ public void return_correct_positions_for_a_complete_unscavenged_chunk() { Assert.AreEqual(numEvents, logPositions.Count); foreach(var logPos in logPositions) - Assert.AreEqual(ChunkHeader.Size + logPos, chunk.GetActualRawPosition(logPos)); + Assert.AreEqual(ChunkHeader.Size + logPos, chunk.GetActualRawPosition(logPos, ITransactionFileTracker.NoOp)); Assert.IsNull(posMap); } @@ -107,7 +108,7 @@ public void return_correct_positions_for_a_scavenged_chunk() { Assert.AreEqual(numEvents, posMap.Count); for (int i = 0; i < numEvents; i++) { Assert.AreEqual(posMap[i].LogPos, logPositions[i]); - Assert.AreEqual(ChunkHeader.Size + posMap[i].ActualPos, chunk.GetActualRawPosition(logPositions[i])); + Assert.AreEqual(ChunkHeader.Size + posMap[i].ActualPos, chunk.GetActualRawPosition(logPositions[i], ITransactionFileTracker.NoOp)); } } @@ -125,9 +126,9 @@ public void return_minus_one_for_positions_that_are_outside_the_range_of_an_unsc Assert.IsNull(posMap); Assert.AreEqual(chunk.LogicalDataSize, chunk.PhysicalDataSize); - Assert.AreEqual(ChunkHeader.Size + chunk.LogicalDataSize - 1, chunk.GetActualRawPosition(chunk.LogicalDataSize - 1)); - Assert.AreEqual(-1, chunk.GetActualRawPosition(chunk.LogicalDataSize)); - Assert.AreEqual(-1, chunk.GetActualRawPosition(chunk.LogicalDataSize + 1)); + Assert.AreEqual(ChunkHeader.Size + chunk.LogicalDataSize - 1, chunk.GetActualRawPosition(chunk.LogicalDataSize - 1, ITransactionFileTracker.NoOp)); + Assert.AreEqual(-1, chunk.GetActualRawPosition(chunk.LogicalDataSize, ITransactionFileTracker.NoOp)); + Assert.AreEqual(-1, chunk.GetActualRawPosition(chunk.LogicalDataSize + 1, ITransactionFileTracker.NoOp)); } [Test] @@ -143,8 +144,8 @@ public void return_minus_one_for_positions_that_do_not_exist_in_a_scavenged_chun Assert.AreEqual(1, logPositions.Count); Assert.AreEqual(1, posMap.Count); - Assert.AreEqual(ChunkHeader.Size + posMap[0].ActualPos, chunk.GetActualRawPosition(logPositions[0])); - Assert.AreEqual(-1, chunk.GetActualRawPosition(logPositions[0] + 1)); + Assert.AreEqual(ChunkHeader.Size + posMap[0].ActualPos, chunk.GetActualRawPosition(logPositions[0], ITransactionFileTracker.NoOp)); + Assert.AreEqual(-1, chunk.GetActualRawPosition(logPositions[0] + 1, ITransactionFileTracker.NoOp)); } [Test] @@ -157,7 +158,7 @@ public void throw_argument_out_of_range_exception_for_negative_positions() { out _, out _); - Assert.Throws(() => chunk.GetActualRawPosition(-1)); + Assert.Throws(() => chunk.GetActualRawPosition(-1, ITransactionFileTracker.NoOp)); } } } diff --git a/src/EventStore.Core.Tests/TransactionLog/when_appending_to_a_tfchunk_and_flushing.cs b/src/EventStore.Core.Tests/TransactionLog/when_appending_to_a_tfchunk_and_flushing.cs index 314e909107b..e638b6abebb 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_appending_to_a_tfchunk_and_flushing.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_appending_to_a_tfchunk_and_flushing.cs @@ -60,7 +60,7 @@ public void the_updated_position_is_returned() { [Test] public void the_record_can_be_read_at_exact_position() { - var res = _chunk.TryReadAt(0, couldBeScavenged: false); + var res = _chunk.TryReadAt(0, couldBeScavenged: false, tracker: ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(_record, res.LogRecord); Assert.AreEqual(_result.OldPosition, res.LogRecord.LogPosition); diff --git a/src/EventStore.Core.Tests/TransactionLog/when_creating_tfchunk_from_empty_file.cs b/src/EventStore.Core.Tests/TransactionLog/when_creating_tfchunk_from_empty_file.cs index 2729d235089..332de588b54 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_creating_tfchunk_from_empty_file.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_creating_tfchunk_from_empty_file.cs @@ -44,7 +44,7 @@ public void append_does_not_throw_exception() { [Test] public void there_is_no_record_at_pos_zero() { - var res = _chunk.TryReadAt(0, couldBeScavenged: true); + var res = _chunk.TryReadAt(0, couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp); Assert.IsFalse(res.Success); } diff --git a/src/EventStore.Core.Tests/TransactionLog/when_having_scavenged_tfchunk_with_all_records_removed.cs b/src/EventStore.Core.Tests/TransactionLog/when_having_scavenged_tfchunk_with_all_records_removed.cs index 2cc906d582b..8ec706516fa 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_having_scavenged_tfchunk_with_all_records_removed.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_having_scavenged_tfchunk_with_all_records_removed.cs @@ -116,37 +116,37 @@ public void third_record_was_written() { [Test] public void prepare1_cant_be_read_at_position() { - var res = _scavengedChunk.TryReadAt((int)_p1.LogPosition, couldBeScavenged: true); + var res = _scavengedChunk.TryReadAt((int)_p1.LogPosition, couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp); Assert.IsFalse(res.Success); } [Test] public void commit1_cant_be_read_at_position() { - var res = _scavengedChunk.TryReadAt((int)_c1.LogPosition, couldBeScavenged: true); + var res = _scavengedChunk.TryReadAt((int)_c1.LogPosition, couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp); Assert.IsFalse(res.Success); } [Test] public void prepare2_cant_be_read_at_position() { - var res = _scavengedChunk.TryReadAt((int)_p2.LogPosition, couldBeScavenged: true); + var res = _scavengedChunk.TryReadAt((int)_p2.LogPosition, couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp); Assert.IsFalse(res.Success); } [Test] public void commit2_cant_be_read_at_position() { - var res = _scavengedChunk.TryReadAt((int)_c2.LogPosition, couldBeScavenged: true); + var res = _scavengedChunk.TryReadAt((int)_c2.LogPosition, couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp); Assert.IsFalse(res.Success); } [Test] public void prepare3_cant_be_read_at_position() { - var res = _scavengedChunk.TryReadAt((int)_p3.LogPosition, couldBeScavenged: true); + var res = _scavengedChunk.TryReadAt((int)_p3.LogPosition, couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp); Assert.IsFalse(res.Success); } [Test] public void commit3_cant_be_read_at_position() { - var res = _scavengedChunk.TryReadAt((int)_c3.LogPosition, couldBeScavenged: true); + var res = _scavengedChunk.TryReadAt((int)_c3.LogPosition, couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp); Assert.IsFalse(res.Success); } diff --git a/src/EventStore.Core.Tests/TransactionLog/when_reading_cached_empty_scavenged_tfchunk.cs b/src/EventStore.Core.Tests/TransactionLog/when_reading_cached_empty_scavenged_tfchunk.cs index 12b4e1f8747..7f4bff51e8d 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_reading_cached_empty_scavenged_tfchunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_reading_cached_empty_scavenged_tfchunk.cs @@ -23,7 +23,7 @@ public override void TestFixtureTearDown() { [Test] public void no_record_at_exact_position_can_be_read() { - Assert.IsFalse(_chunk.TryReadAt(0, couldBeScavenged: true).Success); + Assert.IsFalse(_chunk.TryReadAt(0, couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp).Success); } [Test] diff --git a/src/EventStore.Core.Tests/TransactionLog/when_reading_from_a_cached_tfchunk.cs b/src/EventStore.Core.Tests/TransactionLog/when_reading_from_a_cached_tfchunk.cs index e4e1dbb01fd..659611c21e4 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_reading_from_a_cached_tfchunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_reading_from_a_cached_tfchunk.cs @@ -55,7 +55,7 @@ public void the_chunk_is_cached() { [Test] public void the_record_can_be_read_at_exact_position() { - var res = _cachedChunk.TryReadAt(0, couldBeScavenged: true); + var res = _cachedChunk.TryReadAt(0, couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(_record, res.LogRecord); Assert.AreEqual(_result.OldPosition, res.LogRecord.LogPosition); diff --git a/src/EventStore.Core.Tests/TransactionLog/when_reading_uncached_empty_scavenged_tfchunk.cs b/src/EventStore.Core.Tests/TransactionLog/when_reading_uncached_empty_scavenged_tfchunk.cs index 14d377654f3..ebacd6d6092 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_reading_uncached_empty_scavenged_tfchunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_reading_uncached_empty_scavenged_tfchunk.cs @@ -22,7 +22,7 @@ public override void TestFixtureTearDown() { [Test] public void no_record_at_exact_position_can_be_read() { - Assert.IsFalse(_chunk.TryReadAt(0, couldBeScavenged: true).Success); + Assert.IsFalse(_chunk.TryReadAt(0, couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp).Success); } [Test] diff --git a/src/EventStore.Core.Tests/TransactionLog/when_uncaching_a_tfchunk.cs b/src/EventStore.Core.Tests/TransactionLog/when_uncaching_a_tfchunk.cs index 0595ac9d8df..5519ea79186 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_uncaching_a_tfchunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_uncaching_a_tfchunk.cs @@ -66,7 +66,7 @@ public void the_correct_position_is_returned() { [Test] public void the_record_can_be_read() { - var res = _uncachedChunk.TryReadAt(0, couldBeScavenged: true); + var res = _uncachedChunk.TryReadAt(0, couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(_record, res.LogRecord); Assert.AreEqual(_result.OldPosition, res.LogRecord.LogPosition); diff --git a/src/EventStore.Core.Tests/TransactionLog/when_writing_multiple_records_to_a_tfchunk.cs b/src/EventStore.Core.Tests/TransactionLog/when_writing_multiple_records_to_a_tfchunk.cs index c343fe15523..f607b8e1037 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_writing_multiple_records_to_a_tfchunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_writing_multiple_records_to_a_tfchunk.cs @@ -67,7 +67,7 @@ public void the_second_record_was_written() { [Test] public void the_first_record_can_be_read_at_position() { - var res = _chunk.TryReadAt((int)_position1, couldBeScavenged: true); + var res = _chunk.TryReadAt((int)_position1, couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.IsTrue(res.LogRecord is IPrepareLogRecord); Assert.AreEqual(_prepare1, res.LogRecord); @@ -75,7 +75,7 @@ public void the_first_record_can_be_read_at_position() { [Test] public void the_second_record_can_be_read_at_position() { - var res = _chunk.TryReadAt((int)_position2, couldBeScavenged: true); + var res = _chunk.TryReadAt((int)_position2, couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.IsTrue(res.LogRecord is IPrepareLogRecord); Assert.AreEqual(_prepare2, res.LogRecord); diff --git a/src/EventStore.Core/Services/RedactionService.cs b/src/EventStore.Core/Services/RedactionService.cs index ca32f95e313..16fd531c4e9 100644 --- a/src/EventStore.Core/Services/RedactionService.cs +++ b/src/EventStore.Core/Services/RedactionService.cs @@ -8,6 +8,7 @@ using EventStore.Core.Messaging; using EventStore.Core.Services.Storage.ReaderIndex; using EventStore.Core.Synchronization; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.Chunks.TFChunk; using Serilog; @@ -70,7 +71,7 @@ private void GetEventPosition(string streamName, long eventNumber, IEnvelope env var logPos = eventInfo.LogPosition; var chunk = _db.Manager.GetChunkFor(logPos); var localPosition = chunk.ChunkHeader.GetLocalLogPosition(logPos); - var chunkEventOffset = chunk.GetActualRawPosition(localPosition); + var chunkEventOffset = chunk.GetActualRawPosition(localPosition, ITransactionFileTracker.NoOp); // all the events returned by ReadEventInfo_KeepDuplicates() must exist in the log // since the log record was read from the chunk to check for hash collisions. diff --git a/src/EventStore.Core/Telemetry/TelemetryService.cs b/src/EventStore.Core/Telemetry/TelemetryService.cs index 4aa34667fda..c0fb4597eb9 100644 --- a/src/EventStore.Core/Telemetry/TelemetryService.cs +++ b/src/EventStore.Core/Telemetry/TelemetryService.cs @@ -9,6 +9,7 @@ using EventStore.Core.Messages; using EventStore.Core.Messaging; using EventStore.Core.Services.TimerService; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Checkpoint; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.LogRecords; @@ -178,7 +179,7 @@ private static void OnGossipReceived(IEnvelope envelo private void ReadFirstEpoch() { try { var chunk = _manager.GetChunkFor(0); - var result = chunk.TryReadAt(0, false); + var result = chunk.TryReadAt(0, false, ITransactionFileTracker.NoOp); if (!result.Success) return; diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs index 6ef0ed40bb3..ea790730d05 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs @@ -253,7 +253,7 @@ private void InitCompleted(bool verifyHash, bool optimizeReadSideCache) { SetAttributes(_filename, true); CreateReaderStreams(); - //qq come back to whether we want to track this (just reading the header and footer) + // no need to track reading the header/footer (currently we only track Prepares read anyway) var reader = GetReaderWorkItem(ITransactionFileTracker.NoOp); try { _chunkHeader = ReadHeader(reader.Stream); @@ -615,11 +615,11 @@ private static long GetDataPosition(WriterWorkItem workItem) { // (d) raw (byte offset in file, which is actual - header size) // // this method takes (b) and returns (d) - public long GetActualRawPosition(long logicalPosition) { + public long GetActualRawPosition(long logicalPosition, ITransactionFileTracker tracker) { if (logicalPosition < 0) throw new ArgumentOutOfRangeException(nameof(logicalPosition)); - var actualPosition = _readSide.GetActualPosition(logicalPosition, ITransactionFileTracker.NoOp); + var actualPosition = _readSide.GetActualPosition(logicalPosition, tracker); if (actualPosition < 0) return -1; @@ -748,13 +748,13 @@ public void UnCacheFromMemory() { } } - public bool ExistsAt(long logicalPosition) { - return _readSide.ExistsAt(logicalPosition, ITransactionFileTracker.NoOp); + public bool ExistsAt(long logicalPosition, ITransactionFileTracker tracker) { + return _readSide.ExistsAt(logicalPosition, tracker); } - public void OptimizeExistsAt() { + public void OptimizeExistsAt(ITransactionFileTracker tracker) { if (!ChunkHeader.IsScavenged) return; - ((TFChunkReadSideScavenged)_readSide).OptimizeExistsAt(ITransactionFileTracker.NoOp); + ((TFChunkReadSideScavenged)_readSide).OptimizeExistsAt(tracker); } public void DeOptimizeExistsAt() { @@ -762,8 +762,8 @@ public void DeOptimizeExistsAt() { ((TFChunkReadSideScavenged)_readSide).DeOptimizeExistsAt(); } - public RecordReadResult TryReadAt(long logicalPosition, bool couldBeScavenged) { - return _readSide.TryReadAt(logicalPosition, couldBeScavenged, ITransactionFileTracker.NoOp); + public RecordReadResult TryReadAt(long logicalPosition, bool couldBeScavenged, ITransactionFileTracker tracker) { + return _readSide.TryReadAt(logicalPosition, couldBeScavenged, tracker); } public RecordReadResult TryReadFirst(ITransactionFileTracker tracker) { diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunkReader.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunkReader.cs index 25141f6019a..75c1ace7f20 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunkReader.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunkReader.cs @@ -165,7 +165,7 @@ private RecordReadResult TryReadAtInternal(long position, bool couldBeScavenged, var chunk = _db.Manager.GetChunkFor(position); try { CountRead(chunk.IsCached); - return chunk.TryReadAt(chunk.ChunkHeader.GetLocalLogPosition(position), couldBeScavenged); + return chunk.TryReadAt(chunk.ChunkHeader.GetLocalLogPosition(position), couldBeScavenged, ITransactionFileTracker.NoOp); } catch (FileBeingDeletedException) { if (retries > MaxRetries) throw new FileBeingDeletedException( @@ -188,7 +188,7 @@ private bool ExistsAtInternal(long position, int retries) { CountRead(chunk.IsCached); if (_optimizeReadSideCache) _existsAtOptimizer.Optimize(chunk); - return chunk.ExistsAt(chunk.ChunkHeader.GetLocalLogPosition(position)); + return chunk.ExistsAt(chunk.ChunkHeader.GetLocalLogPosition(position), ITransactionFileTracker.NoOp); } catch (FileBeingDeletedException) { if (retries > MaxRetries) throw new FileBeingDeletedException( diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunkReaderExistsAtOptimizer.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunkReaderExistsAtOptimizer.cs index 52ac822570b..5d433c63113 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunkReaderExistsAtOptimizer.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunkReaderExistsAtOptimizer.cs @@ -34,7 +34,7 @@ public TFChunkReaderExistsAtOptimizer(int maxCached) { if (chunk == null) return false; Log.Debug("Optimizing chunk {chunk} for fast merge...", chunk.FileName); - chunk.OptimizeExistsAt(); + chunk.OptimizeExistsAt(ITransactionFileTracker.NoOp); return true; }; From 5519488b786e7891db64cbf5d5a09f89eea14f62 Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Fri, 22 Nov 2024 11:43:57 +0000 Subject: [PATCH 13/38] more plumbing (TableIndex done) --- ...dex_off_tfile_with_duplicate_events_in_a_stream.cs | 4 ++-- .../Services/Storage/ReadIndexTestScenario.cs | 2 +- .../Services/Storage/RepeatableDbTestScenario.cs | 2 +- .../Services/Storage/SimpleDbTestScenario.cs | 2 +- ...lding_index_for_partially_persisted_transaction.cs | 2 +- .../Scavenging/Helpers/ScavengeTestScenario.cs | 2 +- .../Truncation/TruncateAndReOpenDbScenario.cs | 2 +- .../Scavenge/Infrastructure/Scenario.cs | 2 +- src/EventStore.Core/ClusterVNode.cs | 5 ++++- src/EventStore.Core/Index/TableIndex.cs | 11 ++++++----- .../Services/UserManagement/SystemAccounts.cs | 3 +++ 11 files changed, 22 insertions(+), 15 deletions(-) diff --git a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_duplicate_events_in_a_stream.cs b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_duplicate_events_in_a_stream.cs index 9824856409f..07874fea3cd 100644 --- a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_duplicate_events_in_a_stream.cs +++ b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_duplicate_events_in_a_stream.cs @@ -143,7 +143,7 @@ public override async Task TestFixtureSetUp() { var emptyStreamId = _logFormat.EmptyStreamId; _tableIndex = new TableIndex(indexDirectory, lowHasher, highHasher, emptyStreamId, () => new HashListMemTable(IndexBitnessVersion, MaxEntriesInMemTable * 2), - tracker => new TFReaderLease(readers, tracker), + _ => new TFReaderLease(readers, ITransactionFileTracker.NoOp), IndexBitnessVersion, int.MaxValue, Constants.PTableMaxReaderCountDefault, @@ -192,7 +192,7 @@ public override async Task TestFixtureSetUp() { _tableIndex = new TableIndex(indexDirectory, lowHasher, highHasher, emptyStreamId, () => new HashListMemTable(IndexBitnessVersion, MaxEntriesInMemTable * 2), - tracker => new TFReaderLease(readers, tracker), + _ => new TFReaderLease(readers, ITransactionFileTracker.NoOp), IndexBitnessVersion, int.MaxValue, Constants.PTableMaxReaderCountDefault, diff --git a/src/EventStore.Core.Tests/Services/Storage/ReadIndexTestScenario.cs b/src/EventStore.Core.Tests/Services/Storage/ReadIndexTestScenario.cs index a57184aecc5..670b3a6ca10 100644 --- a/src/EventStore.Core.Tests/Services/Storage/ReadIndexTestScenario.cs +++ b/src/EventStore.Core.Tests/Services/Storage/ReadIndexTestScenario.cs @@ -111,7 +111,7 @@ public override async Task TestFixtureSetUp() { var emptyStreamId = _logFormat.EmptyStreamId; TableIndex = TransformTableIndex(new TableIndex(indexDirectory, LowHasher, HighHasher, emptyStreamId, () => new HashListMemTable(IndexBitnessVersion, MaxEntriesInMemTable * 2), - tracker => new TFReaderLease(readers, tracker), + _ => new TFReaderLease(readers, ITransactionFileTracker.NoOp), IndexBitnessVersion, int.MaxValue, Constants.PTableMaxReaderCountDefault, diff --git a/src/EventStore.Core.Tests/Services/Storage/RepeatableDbTestScenario.cs b/src/EventStore.Core.Tests/Services/Storage/RepeatableDbTestScenario.cs index e9867ac3242..657da180de3 100644 --- a/src/EventStore.Core.Tests/Services/Storage/RepeatableDbTestScenario.cs +++ b/src/EventStore.Core.Tests/Services/Storage/RepeatableDbTestScenario.cs @@ -64,7 +64,7 @@ public void CreateDb(params Rec[] records) { var emptyStreamId = _logFormat.EmptyStreamId; TableIndex = new TableIndex(indexDirectory, lowHasher, highHasher, emptyStreamId, () => new HashListMemTable(PTableVersions.IndexV3, MaxEntriesInMemTable * 2), - tracker => new TFReaderLease(readers, tracker), + _ => new TFReaderLease(readers, ITransactionFileTracker.NoOp), PTableVersions.IndexV3, int.MaxValue, Constants.PTableMaxReaderCountDefault, diff --git a/src/EventStore.Core.Tests/Services/Storage/SimpleDbTestScenario.cs b/src/EventStore.Core.Tests/Services/Storage/SimpleDbTestScenario.cs index 052b701b6b9..affd36ae69f 100644 --- a/src/EventStore.Core.Tests/Services/Storage/SimpleDbTestScenario.cs +++ b/src/EventStore.Core.Tests/Services/Storage/SimpleDbTestScenario.cs @@ -62,7 +62,7 @@ public override async Task TestFixtureSetUp() { var emptyStreamId = _logFormat.EmptyStreamId; TableIndex = new TableIndex(indexDirectory, lowHasher, highHasher, emptyStreamId, () => new HashListMemTable(PTableVersions.IndexV2, MaxEntriesInMemTable * 2), - tracker => new TFReaderLease(readers, tracker), + _ => new TFReaderLease(readers, ITransactionFileTracker.NoOp), PTableVersions.IndexV2, int.MaxValue, Constants.PTableMaxReaderCountDefault, diff --git a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_rebuilding_index_for_partially_persisted_transaction.cs b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_rebuilding_index_for_partially_persisted_transaction.cs index af4e99b44f0..7a72b1e4841 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_rebuilding_index_for_partially_persisted_transaction.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_rebuilding_index_for_partially_persisted_transaction.cs @@ -36,7 +36,7 @@ public override async Task TestFixtureSetUp() { var emptyStreamId = _logFormat.EmptyStreamId; TableIndex = new TableIndex(GetFilePathFor("index"), lowHasher, highHasher, emptyStreamId, () => new HashListMemTable(PTableVersions.IndexV2, maxSize: MaxEntriesInMemTable * 2), - tracker => new TFReaderLease(readers, tracker), + _ => new TFReaderLease(readers, ITransactionFileTracker.NoOp), PTableVersions.IndexV2, 5, Constants.PTableMaxReaderCountDefault, MaxEntriesInMemTable); diff --git a/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeTestScenario.cs b/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeTestScenario.cs index 7001fb1b3da..4a46474dc8d 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeTestScenario.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeTestScenario.cs @@ -65,7 +65,7 @@ public override async Task TestFixtureSetUp() { var emptyStreamId = _logFormat.EmptyStreamId; var tableIndex = new TableIndex(indexDirectory, lowHasher, highHasher, emptyStreamId, () => new HashListMemTable(PTableVersions.IndexV3, maxSize: 200), - tracker => new TFReaderLease(readerPool, tracker), + _ => new TFReaderLease(readerPool, ITransactionFileTracker.NoOp), PTableVersions.IndexV3, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 100, diff --git a/src/EventStore.Core.Tests/TransactionLog/Truncation/TruncateAndReOpenDbScenario.cs b/src/EventStore.Core.Tests/TransactionLog/Truncation/TruncateAndReOpenDbScenario.cs index 50d6d32a65d..60e500745aa 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Truncation/TruncateAndReOpenDbScenario.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Truncation/TruncateAndReOpenDbScenario.cs @@ -43,7 +43,7 @@ private void ReOpenDb() { var emptyStreamId = _logFormat.EmptyStreamId; TableIndex = new TableIndex(indexDirectory, lowHasher, highHasher, emptyStreamId, () => new HashListMemTable(PTableVersions.IndexV3, MaxEntriesInMemTable * 2), - tracker => new TFReaderLease(readers, tracker), + _ => new TFReaderLease(readers, ITransactionFileTracker.NoOp), PTableVersions.IndexV3, int.MaxValue, Constants.PTableMaxReaderCountDefault, diff --git a/src/EventStore.Core.XUnit.Tests/Scavenge/Infrastructure/Scenario.cs b/src/EventStore.Core.XUnit.Tests/Scavenge/Infrastructure/Scenario.cs index f086f63c357..8c8805bcfd8 100644 --- a/src/EventStore.Core.XUnit.Tests/Scavenge/Infrastructure/Scenario.cs +++ b/src/EventStore.Core.XUnit.Tests/Scavenge/Infrastructure/Scenario.cs @@ -254,7 +254,7 @@ private async Task RunInternalAsync( highHasher: highHasher, emptyStreamId: logFormat.EmptyStreamId, memTableFactory: () => new HashListMemTable(PTableVersions.IndexV4, maxSize: 200), - tfReaderFactory: tracker => new TFReaderLease(readerPool, tracker), + tfReaderFactory: _ => new TFReaderLease(readerPool, ITransactionFileTracker.NoOp), ptableVersion: PTableVersions.IndexV4, maxAutoMergeIndexLevel: int.MaxValue, pTableMaxReaderCount: ESConsts.PTableInitialReaderCount, diff --git a/src/EventStore.Core/ClusterVNode.cs b/src/EventStore.Core/ClusterVNode.cs index 4620fc844d2..ac275007096 100644 --- a/src/EventStore.Core/ClusterVNode.cs +++ b/src/EventStore.Core/ClusterVNode.cs @@ -642,7 +642,10 @@ TFChunkDbConfig CreateDbConfig( logFormat.EmptyStreamId, () => new HashListMemTable(options.IndexBitnessVersion, maxSize: options.Database.MaxMemTableSize * 2), - tracker => new TFReaderLease(readerPool, tracker), + username => { + var tracker = trackers.TransactionFileTrackers.GetOrAdd(username); + return new TFReaderLease(readerPool, tracker); + }, options.IndexBitnessVersion, maxSizeForMemory: options.Database.MaxMemTableSize, maxTablesPerLevel: 2, diff --git a/src/EventStore.Core/Index/TableIndex.cs b/src/EventStore.Core/Index/TableIndex.cs index f3cd614f4a4..f5a9eba8b8e 100644 --- a/src/EventStore.Core/Index/TableIndex.cs +++ b/src/EventStore.Core/Index/TableIndex.cs @@ -17,6 +17,7 @@ using ILogger = Serilog.ILogger; using EventStore.Core.TransactionLog.LogRecords; using EventStore.LogCommon; +using EventStore.Core.Services.UserManagement; namespace EventStore.Core.Index { public abstract class TableIndex { @@ -49,7 +50,7 @@ public long PrepareCheckpoint { private readonly byte _ptableVersion; private readonly string _directory; private readonly Func _memTableFactory; - private readonly Func _tfReaderFactory; + private readonly Func _tfReaderFactory; private readonly IIndexFilenameProvider _fileNameProvider; private readonly IIndexStatusTracker _statusTracker; @@ -79,7 +80,7 @@ public TableIndex(string directory, IHasher highHasher, TStreamId emptyStreamId, Func memTableFactory, - Func tfReaderFactory, + Func tfReaderFactory, byte ptableVersion, int maxAutoMergeIndexLevel, int pTableMaxReaderCount, @@ -310,7 +311,7 @@ private void ReadOffQueue() { Log.Debug("Performing manual index merge."); _isManualMergePending = false; - using (var reader = _tfReaderFactory(ITransactionFileTracker.NoOp)) { //qq + using (var reader = _tfReaderFactory(SystemAccounts.SystemIndexMergeName)) { var manualMergeResult = _indexMap.TryManualMerge( (streamId, currentHash) => UpgradeHash(streamId, currentHash), entry => reader.ExistsAt(entry.Position), @@ -361,7 +362,7 @@ private void ReadOffQueue() { _indexMap.SaveToFile(indexmapFile); if (addResult.CanMergeAny) { - using (var reader = _tfReaderFactory(ITransactionFileTracker.NoOp)) { //qq + using (var reader = _tfReaderFactory(SystemAccounts.SystemIndexMergeName)) { MergeResult mergeResult; do { mergeResult = _indexMap.TryMergeOneLevel( @@ -464,7 +465,7 @@ private void ScavengeInternal( try { ct.ThrowIfCancellationRequested(); - using (var reader = _tfReaderFactory(ITransactionFileTracker.NoOp)) { //qq + using (var reader = _tfReaderFactory(SystemAccounts.SystemIndexScavengeName)) { var indexmapFile = Path.Combine(_directory, IndexMapFilename); Func existsAt = entry => reader.ExistsAt(entry.Position); diff --git a/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs b/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs index bd2b3cb3607..e54fddc41da 100644 --- a/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs +++ b/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs @@ -10,5 +10,8 @@ public class SystemAccounts { }; public static readonly ClaimsPrincipal System = new ClaimsPrincipal(new ClaimsIdentity(Claims, "system")); public static readonly ClaimsPrincipal Anonymous = new ClaimsPrincipal(new ClaimsIdentity(new Claim[]{new Claim(ClaimTypes.Anonymous, ""), })); + + public static readonly string SystemIndexMergeName = "system-index-merge"; + public static readonly string SystemIndexScavengeName = "system-index-scavenge"; } } From fd29d2fe360a23aed3391eae23117ae4d17e9f2a Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Fri, 22 Nov 2024 12:00:04 +0000 Subject: [PATCH 14/38] more plumbing (RedactionService done) --- .../RedactionService/RedactionServiceTestFixture.cs | 4 +++- src/EventStore.Core/ClusterVNode.cs | 3 ++- src/EventStore.Core/Services/RedactionService.cs | 8 ++++++-- .../Services/UserManagement/SystemAccounts.cs | 1 + 4 files changed, 12 insertions(+), 4 deletions(-) diff --git a/src/EventStore.Core.Tests/Services/RedactionService/RedactionServiceTestFixture.cs b/src/EventStore.Core.Tests/Services/RedactionService/RedactionServiceTestFixture.cs index 77af5849b84..dc8b81f2e69 100644 --- a/src/EventStore.Core.Tests/Services/RedactionService/RedactionServiceTestFixture.cs +++ b/src/EventStore.Core.Tests/Services/RedactionService/RedactionServiceTestFixture.cs @@ -3,6 +3,7 @@ using EventStore.Core.Synchronization; using EventStore.Core.Tests.Bus; using EventStore.Core.Tests.Services.Storage; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.Services.RedactionService { @@ -16,7 +17,8 @@ public RedactionServiceTestFixture() : base(chunkSize: 1024) { } [SetUp] public virtual Task SetUp() { _switchChunksLock = new SemaphoreSlimLock(); - RedactionService = new RedactionService(new FakeQueuedHandler(), Db, ReadIndex, _switchChunksLock); + RedactionService = new RedactionService(new FakeQueuedHandler(), Db, ReadIndex, _switchChunksLock, + ITransactionFileTrackerFactory.NoOp); return Task.CompletedTask; } diff --git a/src/EventStore.Core/ClusterVNode.cs b/src/EventStore.Core/ClusterVNode.cs index ac275007096..210f048e9ab 100644 --- a/src/EventStore.Core/ClusterVNode.cs +++ b/src/EventStore.Core/ClusterVNode.cs @@ -1408,7 +1408,8 @@ GossipAdvertiseInfo GetGossipAdvertiseInfo() { _mainBus.Subscribe(redactionQueue.WidenFrom()); _mainBus.Subscribe(redactionQueue.WidenFrom()); - var redactionService = new RedactionService(redactionQueue, Db, _readIndex, _switchChunksLock); + var redactionService = new RedactionService(redactionQueue, Db, _readIndex, _switchChunksLock, + trackers.TransactionFileTrackers); redactionBus.Subscribe(redactionService); redactionBus.Subscribe(redactionService); redactionBus.Subscribe(redactionService); diff --git a/src/EventStore.Core/Services/RedactionService.cs b/src/EventStore.Core/Services/RedactionService.cs index 16fd531c4e9..ec9f4d50e68 100644 --- a/src/EventStore.Core/Services/RedactionService.cs +++ b/src/EventStore.Core/Services/RedactionService.cs @@ -7,6 +7,7 @@ using EventStore.Core.Messages; using EventStore.Core.Messaging; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.Services.UserManagement; using EventStore.Core.Synchronization; using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks; @@ -30,6 +31,7 @@ public class RedactionService : private readonly TFChunkDb _db; private readonly IReadIndex _readIndex; private readonly SemaphoreSlimLock _switchChunksLock; + private readonly ITransactionFileTracker _tracker; private const string NewChunkFileExtension = ".tmp"; @@ -37,7 +39,8 @@ public RedactionService( IQueuedHandler queuedHandler, TFChunkDb db, IReadIndex readIndex, - SemaphoreSlimLock switchChunksLock) { + SemaphoreSlimLock switchChunksLock, + ITransactionFileTrackerFactory trackers) { Ensure.NotNull(queuedHandler, nameof(queuedHandler)); Ensure.NotNull(db, nameof(db)); Ensure.NotNull(readIndex, nameof(readIndex)); @@ -47,6 +50,7 @@ public RedactionService( _db = db; _readIndex = readIndex; _switchChunksLock = switchChunksLock; + _tracker = trackers.GetOrAdd(SystemAccounts.SystemRedactionName); } public void Handle(RedactionMessage.GetEventPosition message) { @@ -71,7 +75,7 @@ private void GetEventPosition(string streamName, long eventNumber, IEnvelope env var logPos = eventInfo.LogPosition; var chunk = _db.Manager.GetChunkFor(logPos); var localPosition = chunk.ChunkHeader.GetLocalLogPosition(logPos); - var chunkEventOffset = chunk.GetActualRawPosition(localPosition, ITransactionFileTracker.NoOp); + var chunkEventOffset = chunk.GetActualRawPosition(localPosition, _tracker); // all the events returned by ReadEventInfo_KeepDuplicates() must exist in the log // since the log record was read from the chunk to check for hash collisions. diff --git a/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs b/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs index e54fddc41da..12c8494899e 100644 --- a/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs +++ b/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs @@ -13,5 +13,6 @@ public class SystemAccounts { public static readonly string SystemIndexMergeName = "system-index-merge"; public static readonly string SystemIndexScavengeName = "system-index-scavenge"; + public static readonly string SystemRedactionName = "system-redaction"; } } From 16005549f439b2f171d019b3b0270ab1906d996c Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Fri, 22 Nov 2024 13:24:54 +0000 Subject: [PATCH 15/38] adjust tests --- .../Services/Storage/FakeInMemoryTFReader.cs | 2 -- src/EventStore.Core.XUnit.Tests/LogV3/PartitionManagerTests.cs | 2 -- 2 files changed, 4 deletions(-) diff --git a/src/EventStore.Core.Tests/Services/Storage/FakeInMemoryTFReader.cs b/src/EventStore.Core.Tests/Services/Storage/FakeInMemoryTFReader.cs index 834982d5729..bdd97b5b153 100644 --- a/src/EventStore.Core.Tests/Services/Storage/FakeInMemoryTFReader.cs +++ b/src/EventStore.Core.Tests/Services/Storage/FakeInMemoryTFReader.cs @@ -16,11 +16,9 @@ public FakeInMemoryTfReader(int recordOffset){ } public void OnCheckedOut(ITransactionFileTracker tracker) { - throw new NotImplementedException(); } public void OnReturned() { - throw new NotImplementedException(); } public void AddRecord(ILogRecord record, long position){ diff --git a/src/EventStore.Core.XUnit.Tests/LogV3/PartitionManagerTests.cs b/src/EventStore.Core.XUnit.Tests/LogV3/PartitionManagerTests.cs index 542cd303db1..38f7e90dc9a 100644 --- a/src/EventStore.Core.XUnit.Tests/LogV3/PartitionManagerTests.cs +++ b/src/EventStore.Core.XUnit.Tests/LogV3/PartitionManagerTests.cs @@ -215,11 +215,9 @@ public FakeReader(Guid? rootPartitionId, Guid? rootPartitionTypeId, bool without } public void OnCheckedOut(ITransactionFileTracker tracker) { - throw new NotImplementedException(); } public void OnReturned() { - throw new NotImplementedException(); } public void Reposition(long position) { From 642c802fff23596f25128f5184ddc8f598a157f0 Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Fri, 22 Nov 2024 14:30:46 +0000 Subject: [PATCH 16/38] more plumbing (Index committer done) --- ...ndex_off_tfile_with_duplicate_events_in_a_stream.cs | 2 ++ .../Services/Storage/ReadIndexTestScenario.cs | 1 + .../Services/Storage/RepeatableDbTestScenario.cs | 1 + .../Services/Storage/SimpleDbTestScenario.cs | 1 + ...ilding_index_for_partially_persisted_transaction.cs | 1 + .../Services/Storage/WriteEventsToIndexScenario.cs | 3 ++- .../Scavenging/Helpers/ScavengeTestScenario.cs | 1 + .../Truncation/TruncateAndReOpenDbScenario.cs | 1 + .../Scavenge/Infrastructure/Scenario.cs | 1 + src/EventStore.Core/ClusterVNode.cs | 1 + .../Services/Storage/ReaderIndex/IndexCommitter.cs | 10 +++++++--- .../Services/Storage/ReaderIndex/ReadIndex.cs | 3 ++- .../Services/UserManagement/SystemAccounts.cs | 2 ++ 13 files changed, 23 insertions(+), 5 deletions(-) diff --git a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_duplicate_events_in_a_stream.cs b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_duplicate_events_in_a_stream.cs index 07874fea3cd..2faa5b19b38 100644 --- a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_duplicate_events_in_a_stream.cs +++ b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_duplicate_events_in_a_stream.cs @@ -172,6 +172,7 @@ public override async Task TestFixtureSetUp() { indexCheckpoint: _db.Config.IndexCheckpoint, indexStatusTracker: new IndexStatusTracker.NoOp(), indexTracker: new IndexTracker.NoOp(), + tfTrackers: ITransactionFileTrackerFactory.NoOp, cacheTracker: new CacheHitsMissesTracker.NoOp()); @@ -220,6 +221,7 @@ public override async Task TestFixtureSetUp() { indexCheckpoint: _db.Config.IndexCheckpoint, indexStatusTracker: new IndexStatusTracker.NoOp(), indexTracker: new IndexTracker.NoOp(), + tfTrackers: ITransactionFileTrackerFactory.NoOp, cacheTracker: new CacheHitsMissesTracker.NoOp()); readIndex.IndexCommitter.Init(chaserCheckpoint.Read()); diff --git a/src/EventStore.Core.Tests/Services/Storage/ReadIndexTestScenario.cs b/src/EventStore.Core.Tests/Services/Storage/ReadIndexTestScenario.cs index 670b3a6ca10..546d6439f74 100644 --- a/src/EventStore.Core.Tests/Services/Storage/ReadIndexTestScenario.cs +++ b/src/EventStore.Core.Tests/Services/Storage/ReadIndexTestScenario.cs @@ -140,6 +140,7 @@ public override async Task TestFixtureSetUp() { indexCheckpoint: Db.Config.IndexCheckpoint, indexStatusTracker: new IndexStatusTracker.NoOp(), indexTracker: new IndexTracker.NoOp(), + tfTrackers: ITransactionFileTrackerFactory.NoOp, cacheTracker: new CacheHitsMissesTracker.NoOp()); readIndex.IndexCommitter.Init(ChaserCheckpoint.Read()); diff --git a/src/EventStore.Core.Tests/Services/Storage/RepeatableDbTestScenario.cs b/src/EventStore.Core.Tests/Services/Storage/RepeatableDbTestScenario.cs index 657da180de3..5cb3cd70411 100644 --- a/src/EventStore.Core.Tests/Services/Storage/RepeatableDbTestScenario.cs +++ b/src/EventStore.Core.Tests/Services/Storage/RepeatableDbTestScenario.cs @@ -93,6 +93,7 @@ public void CreateDb(params Rec[] records) { indexCheckpoint: DbRes.Db.Config.IndexCheckpoint, indexStatusTracker: new IndexStatusTracker.NoOp(), indexTracker: new IndexTracker.NoOp(), + tfTrackers: ITransactionFileTrackerFactory.NoOp, cacheTracker: new CacheHitsMissesTracker.NoOp()); readIndex.IndexCommitter.Init(DbRes.Db.Config.ChaserCheckpoint.Read()); diff --git a/src/EventStore.Core.Tests/Services/Storage/SimpleDbTestScenario.cs b/src/EventStore.Core.Tests/Services/Storage/SimpleDbTestScenario.cs index affd36ae69f..de08494bbed 100644 --- a/src/EventStore.Core.Tests/Services/Storage/SimpleDbTestScenario.cs +++ b/src/EventStore.Core.Tests/Services/Storage/SimpleDbTestScenario.cs @@ -91,6 +91,7 @@ public override async Task TestFixtureSetUp() { indexCheckpoint: DbRes.Db.Config.IndexCheckpoint, indexStatusTracker: new IndexStatusTracker.NoOp(), indexTracker: new IndexTracker.NoOp(), + tfTrackers: ITransactionFileTrackerFactory.NoOp, cacheTracker: new CacheHitsMissesTracker.NoOp()); readIndex.IndexCommitter.Init(DbRes.Db.Config.ChaserCheckpoint.Read()); diff --git a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_rebuilding_index_for_partially_persisted_transaction.cs b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_rebuilding_index_for_partially_persisted_transaction.cs index 7a72b1e4841..511c830f73e 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_rebuilding_index_for_partially_persisted_transaction.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_rebuilding_index_for_partially_persisted_transaction.cs @@ -62,6 +62,7 @@ public override async Task TestFixtureSetUp() { indexCheckpoint: Db.Config.IndexCheckpoint, indexStatusTracker: new IndexStatusTracker.NoOp(), indexTracker: new IndexTracker.NoOp(), + tfTrackers: ITransactionFileTrackerFactory.NoOp, cacheTracker: new CacheHitsMissesTracker.NoOp()); readIndex.IndexCommitter.Init(ChaserCheckpoint.Read()); ReadIndex = readIndex; diff --git a/src/EventStore.Core.Tests/Services/Storage/WriteEventsToIndexScenario.cs b/src/EventStore.Core.Tests/Services/Storage/WriteEventsToIndexScenario.cs index 749588597a7..5a704c258a2 100644 --- a/src/EventStore.Core.Tests/Services/Storage/WriteEventsToIndexScenario.cs +++ b/src/EventStore.Core.Tests/Services/Storage/WriteEventsToIndexScenario.cs @@ -151,7 +151,8 @@ public override async Task TestFixtureSetUp() { _systemStreams, emptyStreamId, _sizer); _indexCommitter = new IndexCommitter(_publisher, _indexBackend, _indexReader, _tableIndex, _logFormat.StreamNameIndexConfirmer, _streamNames, _logFormat.EventTypeIndexConfirmer, _logFormat.EventTypes, - _systemStreams, _logFormat.StreamExistenceFilter, _logFormat.StreamExistenceFilterInitializer, new InMemoryCheckpoint(-1), new IndexStatusTracker.NoOp(), new IndexTracker.NoOp(), false); + _systemStreams, _logFormat.StreamExistenceFilter, _logFormat.StreamExistenceFilterInitializer, new InMemoryCheckpoint(-1), new IndexStatusTracker.NoOp(), new IndexTracker.NoOp(), + ITransactionFileTrackerFactory.NoOp, false); WriteEvents(); } diff --git a/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeTestScenario.cs b/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeTestScenario.cs index 4a46474dc8d..f0e0785800b 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeTestScenario.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeTestScenario.cs @@ -88,6 +88,7 @@ public override async Task TestFixtureSetUp() { _dbResult.Db.Config.ReplicationCheckpoint,_dbResult.Db.Config.IndexCheckpoint, new IndexStatusTracker.NoOp(), new IndexTracker.NoOp(), + ITransactionFileTrackerFactory.NoOp, new CacheHitsMissesTracker.NoOp()); readIndex.IndexCommitter.Init(_dbResult.Db.Config.WriterCheckpoint.Read()); ReadIndex = readIndex; diff --git a/src/EventStore.Core.Tests/TransactionLog/Truncation/TruncateAndReOpenDbScenario.cs b/src/EventStore.Core.Tests/TransactionLog/Truncation/TruncateAndReOpenDbScenario.cs index 60e500745aa..7827341cc87 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Truncation/TruncateAndReOpenDbScenario.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Truncation/TruncateAndReOpenDbScenario.cs @@ -71,6 +71,7 @@ private void ReOpenDb() { indexCheckpoint: Db.Config.IndexCheckpoint, indexStatusTracker: new IndexStatusTracker.NoOp(), indexTracker: new IndexTracker.NoOp(), + tfTrackers: ITransactionFileTrackerFactory.NoOp, cacheTracker: new CacheHitsMissesTracker.NoOp()); readIndex.IndexCommitter.Init(ChaserCheckpoint.Read()); ReadIndex = readIndex; diff --git a/src/EventStore.Core.XUnit.Tests/Scavenge/Infrastructure/Scenario.cs b/src/EventStore.Core.XUnit.Tests/Scavenge/Infrastructure/Scenario.cs index 8c8805bcfd8..8eea61fef78 100644 --- a/src/EventStore.Core.XUnit.Tests/Scavenge/Infrastructure/Scenario.cs +++ b/src/EventStore.Core.XUnit.Tests/Scavenge/Infrastructure/Scenario.cs @@ -286,6 +286,7 @@ private async Task RunInternalAsync( indexCheckpoint: dbResult.Db.Config.IndexCheckpoint, indexStatusTracker: new IndexStatusTracker.NoOp(), indexTracker: new IndexTracker.NoOp(), + tfTrackers: ITransactionFileTrackerFactory.NoOp, cacheTracker: new CacheHitsMissesTracker.NoOp()); readIndex.IndexCommitter.Init(dbResult.Db.Config.WriterCheckpoint.Read()); diff --git a/src/EventStore.Core/ClusterVNode.cs b/src/EventStore.Core/ClusterVNode.cs index 210f048e9ab..75fa0638ae6 100644 --- a/src/EventStore.Core/ClusterVNode.cs +++ b/src/EventStore.Core/ClusterVNode.cs @@ -683,6 +683,7 @@ TFChunkDbConfig CreateDbConfig( Db.Config.IndexCheckpoint, trackers.IndexStatusTracker, trackers.IndexTracker, + trackers.TransactionFileTrackers, trackers.CacheHitsMissesTracker); _readIndex = readIndex; var writer = new TFChunkWriter(Db); diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/IndexCommitter.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/IndexCommitter.cs index 4ed8606351b..ea1532ec8b6 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/IndexCommitter.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/IndexCommitter.cs @@ -13,6 +13,7 @@ using EventStore.Core.TransactionLog.LogRecords; using ILogger = Serilog.ILogger; using EventStore.LogCommon; +using EventStore.Core.Services.UserManagement; namespace EventStore.Core.Services.Storage.ReaderIndex { public interface IIndexCommitter { @@ -48,6 +49,7 @@ public class IndexCommitter : IndexCommitter, IIndexCommitter> commitedPrepares, bool is } private IEnumerable> GetTransactionPrepares(long transactionPos, long commitPos) { - using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq + using (var reader = _backend.BorrowReader(_tfTracker)) { reader.Reposition(transactionPos); // in case all prepares were scavenged, we should not read past Commit LogPosition @@ -487,7 +491,7 @@ private void CheckStreamVersion(TStreamId streamId, long newEventNumber, CommitL private void CheckDuplicateEvents(TStreamId streamId, CommitLogRecord commit, IList> indexEntries, IList> prepares) { - using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq + using (var reader = _backend.BorrowReader(_tfTracker)) { var entries = _tableIndex.GetRange(streamId, indexEntries[0].Version, indexEntries[indexEntries.Count - 1].Version); foreach (var indexEntry in entries) { diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/ReadIndex.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/ReadIndex.cs index 22ef4361d6d..dbae219b700 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/ReadIndex.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/ReadIndex.cs @@ -59,6 +59,7 @@ public ReadIndex(IPublisher bus, ICheckpoint indexCheckpoint, IIndexStatusTracker indexStatusTracker, IIndexTracker indexTracker, + ITransactionFileTrackerFactory tfTrackers, ICacheHitsMissesTracker cacheTracker) { Ensure.NotNull(bus, "bus"); @@ -92,7 +93,7 @@ public ReadIndex(IPublisher bus, _indexWriter = new IndexWriter(indexBackend, _indexReader, _streamIds, _streamNames, systemStreams, emptyStreamName, sizer); _indexCommitter = new IndexCommitter(bus, indexBackend, _indexReader, tableIndex, streamNameIndex, _streamNames, eventTypeIndex, eventTypeNames, systemStreams, streamExistenceFilter, - streamExistenceFilterInitializer, indexCheckpoint, indexStatusTracker, indexTracker, additionalCommitChecks); + streamExistenceFilterInitializer, indexCheckpoint, indexStatusTracker, indexTracker, tfTrackers, additionalCommitChecks); _allReader = new AllReader(indexBackend, _indexCommitter, _streamNames, eventTypeNames); RegisterHitsMisses(cacheTracker); diff --git a/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs b/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs index 12c8494899e..a804f83f8da 100644 --- a/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs +++ b/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs @@ -11,8 +11,10 @@ public class SystemAccounts { public static readonly ClaimsPrincipal System = new ClaimsPrincipal(new ClaimsIdentity(Claims, "system")); public static readonly ClaimsPrincipal Anonymous = new ClaimsPrincipal(new ClaimsIdentity(new Claim[]{new Claim(ClaimTypes.Anonymous, ""), })); + //qq consider granularity public static readonly string SystemIndexMergeName = "system-index-merge"; public static readonly string SystemIndexScavengeName = "system-index-scavenge"; public static readonly string SystemRedactionName = "system-redaction"; + public static readonly string SystemIndexCommitterName = "system-index-committer"; } } From e7ad9d6703544b28cddff7b077c2d162412ed028 Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Fri, 22 Nov 2024 14:35:49 +0000 Subject: [PATCH 17/38] more plumbing (LogV2StreamExistenceFilterInitializer done) --- src/EventStore.Core/ClusterVNode.cs | 5 ++++- src/EventStore.Core/LogAbstraction/LogFormatAbstractor.cs | 2 +- .../LogV2/LogV2StreamExistenceFilterInitializer.cs | 7 ++++--- .../Services/UserManagement/SystemAccounts.cs | 1 + 4 files changed, 10 insertions(+), 5 deletions(-) diff --git a/src/EventStore.Core/ClusterVNode.cs b/src/EventStore.Core/ClusterVNode.cs index 75fa0638ae6..c9797ac0ae3 100644 --- a/src/EventStore.Core/ClusterVNode.cs +++ b/src/EventStore.Core/ClusterVNode.cs @@ -596,7 +596,10 @@ TFChunkDbConfig CreateDbConfig( MaxReaderCount = pTableMaxReaderCount, StreamExistenceFilterSize = options.Database.StreamExistenceFilterSize, StreamExistenceFilterCheckpoint = Db.Config.StreamExistenceFilterCheckpoint, - TFReaderLeaseFactory = tracker => new TFReaderLease(readerPool, tracker) + TFReaderLeaseFactory = username => { + var tracker = trackers.TransactionFileTrackers.GetOrAdd(username); + return new TFReaderLease(readerPool, tracker); + } }); ICacheResizer streamInfoCacheResizer; diff --git a/src/EventStore.Core/LogAbstraction/LogFormatAbstractor.cs b/src/EventStore.Core/LogAbstraction/LogFormatAbstractor.cs index 9bb69bd7739..180eebd3b33 100644 --- a/src/EventStore.Core/LogAbstraction/LogFormatAbstractor.cs +++ b/src/EventStore.Core/LogAbstraction/LogFormatAbstractor.cs @@ -21,7 +21,7 @@ public record LogFormatAbstractorOptions { public ICheckpoint StreamExistenceFilterCheckpoint { get; init; } public TimeSpan StreamExistenceFilterCheckpointInterval { get; init; } = TimeSpan.FromSeconds(30); public TimeSpan StreamExistenceFilterCheckpointDelay { get; init; } = TimeSpan.FromSeconds(5); - public Func TFReaderLeaseFactory { get; init; } + public Func TFReaderLeaseFactory { get; init; } } public interface ILogFormatAbstractorFactory { diff --git a/src/EventStore.Core/LogV2/LogV2StreamExistenceFilterInitializer.cs b/src/EventStore.Core/LogV2/LogV2StreamExistenceFilterInitializer.cs index 527e5ae33db..8282f9bead1 100644 --- a/src/EventStore.Core/LogV2/LogV2StreamExistenceFilterInitializer.cs +++ b/src/EventStore.Core/LogV2/LogV2StreamExistenceFilterInitializer.cs @@ -5,6 +5,7 @@ using EventStore.Core.Exceptions; using EventStore.Core.Index; using EventStore.Core.LogAbstraction; +using EventStore.Core.Services.UserManagement; using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using EventStore.LogCommon; @@ -21,13 +22,13 @@ namespace EventStore.Core.LogV2 { /// of the previous record, which is fine. the net effect is an extra record is initialized /// on startup next time. public class LogV2StreamExistenceFilterInitializer : INameExistenceFilterInitializer { - private readonly Func _tfReaderFactory; + private readonly Func _tfReaderFactory; private readonly ITableIndex _tableIndex; protected static readonly ILogger Log = Serilog.Log.ForContext(); public LogV2StreamExistenceFilterInitializer( - Func tfReaderFactory, + Func tfReaderFactory, ITableIndex tableIndex) { Ensure.NotNull(tableIndex, nameof(tableIndex)); @@ -134,7 +135,7 @@ private void InitializeFromLog(INameExistenceFilter filter) { // whether the checkpoint is the pre or post position of the last processed record. var startPosition = filter.CurrentCheckpoint == -1 ? 0 : filter.CurrentCheckpoint; Log.Information("Initializing from log starting at {startPosition:N0}", startPosition); - using var reader = _tfReaderFactory(ITransactionFileTracker.NoOp); //qq + using var reader = _tfReaderFactory(SystemAccounts.SystemName); reader.Reposition(startPosition); while (TryReadNextLogRecord(reader, out var result)) { diff --git a/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs b/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs index a804f83f8da..5bb31b4f2a5 100644 --- a/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs +++ b/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs @@ -12,6 +12,7 @@ public class SystemAccounts { public static readonly ClaimsPrincipal Anonymous = new ClaimsPrincipal(new ClaimsIdentity(new Claim[]{new Claim(ClaimTypes.Anonymous, ""), })); //qq consider granularity + public static readonly string SystemName = "system"; public static readonly string SystemIndexMergeName = "system-index-merge"; public static readonly string SystemIndexScavengeName = "system-index-scavenge"; public static readonly string SystemRedactionName = "system-redaction"; From 262b6a0fb81f106321f31944d1f3b012e4e63741 Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Fri, 22 Nov 2024 14:38:44 +0000 Subject: [PATCH 18/38] more plumbing (IndexReader done) --- .../IIndexReaderExtensions.cs | 7 +- .../HashCollisions/with_hash_collisions.cs | 10 +- .../Storage/ReadIndex/FakeIndexReader.cs | 29 ++--- .../LogFormatAbstractorV3Tests.cs | 28 ++--- .../EventTypeIdToNameFromStandardIndex.cs | 5 +- .../LogV3/StreamIdToNameFromStandardIndex.cs | 5 +- .../Storage/ReaderIndex/IndexCommitter.cs | 11 +- .../Storage/ReaderIndex/IndexReader.cs | 101 ++++++++++-------- .../Storage/ReaderIndex/IndexWriter.cs | 16 +-- .../Services/Storage/ReaderIndex/ReadIndex.cs | 28 ++--- .../TransactionLog/Chunks/TFChunkReader.cs | 4 +- 11 files changed, 128 insertions(+), 116 deletions(-) diff --git a/src/EventStore.Core.Tests/IIndexReaderExtensions.cs b/src/EventStore.Core.Tests/IIndexReaderExtensions.cs index 08bc0b89a73..ce6bead5c44 100644 --- a/src/EventStore.Core.Tests/IIndexReaderExtensions.cs +++ b/src/EventStore.Core.Tests/IIndexReaderExtensions.cs @@ -1,14 +1,15 @@ using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; namespace EventStore.Core.Tests { public static class IIndexReaderExtensions { public static IndexReadEventResult ReadEvent(this IIndexReader index, string streamName, long eventNumber) => - index.ReadEvent(streamName, streamName, eventNumber); + index.ReadEvent(streamName, streamName, eventNumber, ITransactionFileTracker.NoOp); public static IndexReadStreamResult ReadStreamEventsBackward(this IIndexReader index, string streamName, long fromEventNumber, int maxCount) => - index.ReadStreamEventsBackward(streamName, streamName, fromEventNumber, maxCount); + index.ReadStreamEventsBackward(streamName, streamName, fromEventNumber, maxCount, ITransactionFileTracker.NoOp); public static IndexReadStreamResult ReadStreamEventsForward(this IIndexReader index, string streamName, long fromEventNumber, int maxCount) => - index.ReadStreamEventsForward(streamName, streamName, fromEventNumber, maxCount); + index.ReadStreamEventsForward(streamName, streamName, fromEventNumber, maxCount, ITransactionFileTracker.NoOp); } } diff --git a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_hash_collisions.cs b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_hash_collisions.cs index 33d578e8b32..e53081b4d5c 100644 --- a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_hash_collisions.cs +++ b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_hash_collisions.cs @@ -101,7 +101,7 @@ protected override void when() { [Test] public void should_return_no_stream() { - Assert.AreEqual(ExpectedVersion.NoStream, _indexReader.GetStreamLastEventNumber("account--696193173")); + Assert.AreEqual(ExpectedVersion.NoStream, _indexReader.GetStreamLastEventNumber("account--696193173", ITransactionFileTracker.NoOp)); } } @@ -138,7 +138,7 @@ protected override void when() { [Test] public void should_return_invalid_event_number() { Assert.AreEqual(EventStore.Core.Data.EventNumber.Invalid, - _indexReader.GetStreamLastEventNumber(stream1Id)); + _indexReader.GetStreamLastEventNumber(stream1Id, ITransactionFileTracker.NoOp)); } } @@ -174,7 +174,7 @@ protected override void when() { [Test] public void should_return_last_event_number() { - Assert.AreEqual(0, _indexReader.GetStreamLastEventNumber(stream1Id)); + Assert.AreEqual(0, _indexReader.GetStreamLastEventNumber(stream1Id, ITransactionFileTracker.NoOp)); } } @@ -209,7 +209,7 @@ protected override void when() { [Test] public void should_return_invalid_event_number() { Assert.AreEqual(EventStore.Core.Data.EventNumber.Invalid, - _indexReader.GetStreamLastEventNumber("account--696193173")); + _indexReader.GetStreamLastEventNumber("account--696193173", ITransactionFileTracker.NoOp)); } } @@ -330,7 +330,7 @@ protected override void when() { [Test] public void should_return_the_correct_last_event_number() { - var result = _indexReader.GetStreamLastEventNumber(streamId); + var result = _indexReader.GetStreamLastEventNumber(streamId, ITransactionFileTracker.NoOp); Assert.AreEqual(2, result); } diff --git a/src/EventStore.Core.Tests/Services/Storage/ReadIndex/FakeIndexReader.cs b/src/EventStore.Core.Tests/Services/Storage/ReadIndex/FakeIndexReader.cs index c0e6cac477d..72b1790bee6 100644 --- a/src/EventStore.Core.Tests/Services/Storage/ReadIndex/FakeIndexReader.cs +++ b/src/EventStore.Core.Tests/Services/Storage/ReadIndex/FakeIndexReader.cs @@ -2,6 +2,7 @@ using EventStore.Core.Data; using EventStore.Core.Messages; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; namespace EventStore.Core.Tests.Services.Storage.ReadIndex; @@ -11,30 +12,30 @@ class FakeIndexReader : IIndexReader { public long NotCachedStreamInfo { get; } public long HashCollisions { get; } - public IndexReadEventResult ReadEvent(string streamName, TStreamId streamId, long eventNumber) { + public IndexReadEventResult ReadEvent(string streamName, TStreamId streamId, long eventNumber, ITransactionFileTracker tracker) { throw new NotImplementedException(); } public IndexReadStreamResult - ReadStreamEventsForward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount) { + ReadStreamEventsForward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount, ITransactionFileTracker tracker) { throw new NotImplementedException(); } public IndexReadStreamResult ReadStreamEventsBackward(string streamName, TStreamId streamId, long fromEventNumber, - int maxCount) { + int maxCount, ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public StorageMessage.EffectiveAcl GetEffectiveAcl(TStreamId streamId) { + public StorageMessage.EffectiveAcl GetEffectiveAcl(TStreamId streamId, ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public IndexReadEventInfoResult ReadEventInfo_KeepDuplicates(TStreamId streamId, long eventNumber) { + public IndexReadEventInfoResult ReadEventInfo_KeepDuplicates(TStreamId streamId, long eventNumber, ITransactionFileTracker tracker) { throw new NotImplementedException(); } public IndexReadEventInfoResult ReadEventInfoForward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, - long beforePosition) { + long beforePosition, ITransactionFileTracker tracker) { throw new NotImplementedException(); } @@ -44,36 +45,36 @@ public IndexReadEventInfoResult ReadEventInfoForward_NoCollisions(ulong stream, } public IndexReadEventInfoResult ReadEventInfoBackward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, - long beforePosition) { + long beforePosition, ITransactionFileTracker tracker) { throw new NotImplementedException(); } public IndexReadEventInfoResult ReadEventInfoBackward_NoCollisions(ulong stream, Func getStreamId, long fromEventNumber, - int maxCount, long beforePosition) { + int maxCount, long beforePosition, ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public IPrepareLogRecord ReadPrepare(TStreamId streamId, long eventNumber) { + public IPrepareLogRecord ReadPrepare(TStreamId streamId, long eventNumber, ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public TStreamId GetEventStreamIdByTransactionId(long transactionId) { + public TStreamId GetEventStreamIdByTransactionId(long transactionId, ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public StreamMetadata GetStreamMetadata(TStreamId streamId) { + public StreamMetadata GetStreamMetadata(TStreamId streamId, ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public long GetStreamLastEventNumber(TStreamId streamId) { + public long GetStreamLastEventNumber(TStreamId streamId, ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public long GetStreamLastEventNumber_KnownCollisions(TStreamId streamId, long beforePosition) { + public long GetStreamLastEventNumber_KnownCollisions(TStreamId streamId, long beforePosition, ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public long GetStreamLastEventNumber_NoCollisions(ulong stream, Func getStreamId, long beforePosition) { + public long GetStreamLastEventNumber_NoCollisions(ulong stream, Func getStreamId, long beforePosition, ITransactionFileTracker tracker) { throw new NotImplementedException(); } } diff --git a/src/EventStore.Core.XUnit.Tests/LogAbstraction/LogFormatAbstractorV3Tests.cs b/src/EventStore.Core.XUnit.Tests/LogAbstraction/LogFormatAbstractorV3Tests.cs index 2e8075b6985..839d7ad9890 100644 --- a/src/EventStore.Core.XUnit.Tests/LogAbstraction/LogFormatAbstractorV3Tests.cs +++ b/src/EventStore.Core.XUnit.Tests/LogAbstraction/LogFormatAbstractorV3Tests.cs @@ -317,7 +317,7 @@ class MockIndexReader : IIndexReader { public int StreamCount => _index[LogV3SystemStreams.StreamsCreatedStreamNumber].Count; public int EventTypeCount => _index[LogV3SystemStreams.EventTypesStreamNumber].Count; - public IPrepareLogRecord ReadPrepare(StreamId streamId, long eventNumber) { + public IPrepareLogRecord ReadPrepare(StreamId streamId, long eventNumber, ITransactionFileTracker tracker) { // simulates what would be in the index. return _index[streamId][eventNumber]; } @@ -328,50 +328,50 @@ public IPrepareLogRecord ReadPrepare(StreamId streamId, long eventNumb public long HashCollisions => throw new NotImplementedException(); - public StorageMessage.EffectiveAcl GetEffectiveAcl(StreamId streamId) => + public StorageMessage.EffectiveAcl GetEffectiveAcl(StreamId streamId, ITransactionFileTracker tracker) => throw new NotImplementedException(); - public IndexReadEventInfoResult ReadEventInfo_KeepDuplicates(uint streamId, long eventNumber) { + public IndexReadEventInfoResult ReadEventInfo_KeepDuplicates(uint streamId, long eventNumber, ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public StreamId GetEventStreamIdByTransactionId(long transactionId) => + public StreamId GetEventStreamIdByTransactionId(long transactionId, ITransactionFileTracker tracker) => throw new NotImplementedException(); - public long GetStreamLastEventNumber(StreamId streamId) { + public long GetStreamLastEventNumber(StreamId streamId, ITransactionFileTracker tracker) { if (streamId == LogV3SystemStreams.StreamsCreatedStreamNumber) return _index[streamId].Count - 1; throw new NotImplementedException(); } - public StreamMetadata GetStreamMetadata(StreamId streamId) => + public StreamMetadata GetStreamMetadata(StreamId streamId, ITransactionFileTracker tracker) => throw new NotImplementedException(); - public IndexReadEventResult ReadEvent(string streamName, StreamId streamId, long eventNumber) => + public IndexReadEventResult ReadEvent(string streamName, StreamId streamId, long eventNumber, ITransactionFileTracker tracker) => throw new NotImplementedException(); - public IndexReadStreamResult ReadStreamEventsBackward(string streamName, StreamId streamId, long fromEventNumber, int maxCount) => + public IndexReadStreamResult ReadStreamEventsBackward(string streamName, StreamId streamId, long fromEventNumber, int maxCount, ITransactionFileTracker tracker) => throw new NotImplementedException(); - public IndexReadStreamResult ReadStreamEventsForward(string streamName, StreamId streamId, long fromEventNumber, int maxCount) => + public IndexReadStreamResult ReadStreamEventsForward(string streamName, StreamId streamId, long fromEventNumber, int maxCount, ITransactionFileTracker tracker) => throw new NotImplementedException(); - public IndexReadEventInfoResult ReadEventInfoForward_KnownCollisions(uint streamId, long fromEventNumber, int maxCount, long beforePosition) => + public IndexReadEventInfoResult ReadEventInfoForward_KnownCollisions(uint streamId, long fromEventNumber, int maxCount, long beforePosition, ITransactionFileTracker tracker) => throw new NotImplementedException(); public IndexReadEventInfoResult ReadEventInfoForward_NoCollisions(ulong stream, long fromEventNumber, int maxCount, long beforePosition) => throw new NotImplementedException(); - public IndexReadEventInfoResult ReadEventInfoBackward_KnownCollisions(uint streamId, long fromEventNumber, int maxCount, long beforePosition) => + public IndexReadEventInfoResult ReadEventInfoBackward_KnownCollisions(uint streamId, long fromEventNumber, int maxCount, long beforePosition, ITransactionFileTracker tracker) => throw new NotImplementedException(); - public IndexReadEventInfoResult ReadEventInfoBackward_NoCollisions(ulong stream, Func getStreamId, long fromEventNumber, int maxCount, long beforePosition) => + public IndexReadEventInfoResult ReadEventInfoBackward_NoCollisions(ulong stream, Func getStreamId, long fromEventNumber, int maxCount, long beforePosition, ITransactionFileTracker tracker) => throw new NotImplementedException(); - public long GetStreamLastEventNumber_KnownCollisions(uint streamId, long beforePosition) => + public long GetStreamLastEventNumber_KnownCollisions(uint streamId, long beforePosition, ITransactionFileTracker tracker) => throw new NotImplementedException(); - public long GetStreamLastEventNumber_NoCollisions(ulong stream, Func getStreamId, long beforePosition) => + public long GetStreamLastEventNumber_NoCollisions(ulong stream, Func getStreamId, long beforePosition, ITransactionFileTracker tracker) => throw new NotImplementedException(); } } diff --git a/src/EventStore.Core/LogV3/EventTypeIdToNameFromStandardIndex.cs b/src/EventStore.Core/LogV3/EventTypeIdToNameFromStandardIndex.cs index be436e67b34..8364ae716ba 100644 --- a/src/EventStore.Core/LogV3/EventTypeIdToNameFromStandardIndex.cs +++ b/src/EventStore.Core/LogV3/EventTypeIdToNameFromStandardIndex.cs @@ -2,6 +2,7 @@ using EventStore.Core.Data; using EventStore.Core.LogAbstraction; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; namespace EventStore.Core.LogV3 { @@ -15,7 +16,7 @@ public EventTypeIdToNameFromStandardIndex(IIndexReader indexReader) { public bool TryGetName(uint eventTypeId, out string name) { var record = _indexReader.ReadPrepare( streamId: LogV3SystemStreams.EventTypesStreamNumber, - eventNumber: EventTypeIdConverter.ToEventNumber(eventTypeId)); + eventNumber: EventTypeIdConverter.ToEventNumber(eventTypeId), tracker: ITransactionFileTracker.NoOp); if (record is null) { name = null; @@ -30,7 +31,7 @@ public bool TryGetName(uint eventTypeId, out string name) { } public bool TryGetLastValue(out uint lastValue) { - var lastEventNumber = _indexReader.GetStreamLastEventNumber(LogV3SystemStreams.EventTypesStreamNumber); + var lastEventNumber = _indexReader.GetStreamLastEventNumber(LogV3SystemStreams.EventTypesStreamNumber, ITransactionFileTracker.NoOp); var success = ExpectedVersion.NoStream < lastEventNumber && lastEventNumber != EventNumber.DeletedStream; lastValue = EventTypeIdConverter.ToEventTypeId(lastEventNumber); return success; diff --git a/src/EventStore.Core/LogV3/StreamIdToNameFromStandardIndex.cs b/src/EventStore.Core/LogV3/StreamIdToNameFromStandardIndex.cs index aab4f6cdfed..52bd4c90530 100644 --- a/src/EventStore.Core/LogV3/StreamIdToNameFromStandardIndex.cs +++ b/src/EventStore.Core/LogV3/StreamIdToNameFromStandardIndex.cs @@ -2,6 +2,7 @@ using EventStore.Core.Data; using EventStore.Core.LogAbstraction; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using StreamId = System.UInt32; @@ -21,7 +22,7 @@ public bool TryGetName(StreamId streamId, out string name) { // explicitly create metastreams. var record = _indexReader.ReadPrepare( streamId: LogV3SystemStreams.StreamsCreatedStreamNumber, - eventNumber: StreamIdConverter.ToEventNumber(streamId)); + eventNumber: StreamIdConverter.ToEventNumber(streamId), tracker: ITransactionFileTracker.NoOp); if (record is null) { name = null; @@ -36,7 +37,7 @@ public bool TryGetName(StreamId streamId, out string name) { } public bool TryGetLastValue(out StreamId lastValue) { - var lastEventNumber = _indexReader.GetStreamLastEventNumber(LogV3SystemStreams.StreamsCreatedStreamNumber); + var lastEventNumber = _indexReader.GetStreamLastEventNumber(LogV3SystemStreams.StreamsCreatedStreamNumber, ITransactionFileTracker.NoOp); var success = ExpectedVersion.NoStream < lastEventNumber && lastEventNumber != EventNumber.DeletedStream; lastValue = StreamIdConverter.ToStreamId(lastEventNumber); return success; diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/IndexCommitter.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/IndexCommitter.cs index ea1532ec8b6..13002475cd0 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/IndexCommitter.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/IndexCommitter.cs @@ -292,7 +292,7 @@ public long Commit(CommitLogRecord commit, bool isTfEof, bool cacheLastEventNumb if (indexEntries.Count > 0) { if (_additionalCommitChecks && cacheLastEventNumber) { - CheckStreamVersion(streamId, indexEntries[0].Version, commit); + CheckStreamVersion(streamId, indexEntries[0].Version, commit, _tfTracker); CheckDuplicateEvents(streamId, commit, indexEntries, prepares); } @@ -402,7 +402,7 @@ public long Commit(IList> commitedPrepares, bool is if (indexEntries.Count > 0) { if (_additionalCommitChecks && cacheLastEventNumber) { - CheckStreamVersion(streamId, indexEntries[0].Version, null); // TODO AN: bad passing null commit + CheckStreamVersion(streamId, indexEntries[0].Version, null, _tfTracker); // TODO AN: bad passing null commit CheckDuplicateEvents(streamId, null, indexEntries, prepares); // TODO AN: bad passing null commit } @@ -473,11 +473,12 @@ private IEnumerable> GetTransactionPrepares(long tr } } - private void CheckStreamVersion(TStreamId streamId, long newEventNumber, CommitLogRecord commit) { + private void CheckStreamVersion(TStreamId streamId, long newEventNumber, CommitLogRecord commit, + ITransactionFileTracker tracker) { if (newEventNumber == EventNumber.DeletedStream) return; - long lastEventNumber = _indexReader.GetStreamLastEventNumber(streamId); + long lastEventNumber = _indexReader.GetStreamLastEventNumber(streamId, tracker); if (newEventNumber != lastEventNumber + 1) { if (Debugger.IsAttached) Debugger.Break(); @@ -512,7 +513,7 @@ private void CheckDuplicateEvents(TStreamId streamId, CommitLogRecord commit, IL } private SystemSettings GetSystemSettings() { - var res = _indexReader.ReadEvent(IndexReader.UnspecifiedStreamName, _systemStreams.SettingsStream, -1); + var res = _indexReader.ReadEvent(IndexReader.UnspecifiedStreamName, _systemStreams.SettingsStream, -1, _tfTracker); return res.Result == ReadEventResult.Success ? DeserializeSystemSettings(res.Record.Data) : null; } diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/IndexReader.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/IndexReader.cs index 8b9689c7cb9..58ab19f29b2 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/IndexReader.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/IndexReader.cs @@ -21,27 +21,27 @@ public interface IIndexReader { // streamId drives the read, streamName is only for populating on the result. // this was less messy than safely adding the streamName to the EventRecord at some point after construction - IndexReadEventResult ReadEvent(string streamName, TStreamId streamId, long eventNumber); - IndexReadStreamResult ReadStreamEventsForward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount); - IndexReadStreamResult ReadStreamEventsBackward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount); - StorageMessage.EffectiveAcl GetEffectiveAcl(TStreamId streamId); - IndexReadEventInfoResult ReadEventInfo_KeepDuplicates(TStreamId streamId, long eventNumber); - IndexReadEventInfoResult ReadEventInfoForward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, long beforePosition); + IndexReadEventResult ReadEvent(string streamName, TStreamId streamId, long eventNumber, ITransactionFileTracker tracker); + IndexReadStreamResult ReadStreamEventsForward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount, ITransactionFileTracker tracker); + IndexReadStreamResult ReadStreamEventsBackward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount, ITransactionFileTracker tracker); + StorageMessage.EffectiveAcl GetEffectiveAcl(TStreamId streamId, ITransactionFileTracker tracker); + IndexReadEventInfoResult ReadEventInfo_KeepDuplicates(TStreamId streamId, long eventNumber, ITransactionFileTracker tracker); + IndexReadEventInfoResult ReadEventInfoForward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, long beforePosition, ITransactionFileTracker tracker); IndexReadEventInfoResult ReadEventInfoForward_NoCollisions(ulong stream, long fromEventNumber, int maxCount, long beforePosition); - IndexReadEventInfoResult ReadEventInfoBackward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, long beforePosition); - IndexReadEventInfoResult ReadEventInfoBackward_NoCollisions(ulong stream, Func getStreamId, long fromEventNumber, int maxCount, long beforePosition); + IndexReadEventInfoResult ReadEventInfoBackward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, long beforePosition, ITransactionFileTracker tracker); + IndexReadEventInfoResult ReadEventInfoBackward_NoCollisions(ulong stream, Func getStreamId, long fromEventNumber, int maxCount, long beforePosition, ITransactionFileTracker tracker); /// /// Doesn't filter $maxAge, $maxCount, $tb(truncate before), doesn't check stream deletion, etc. /// - IPrepareLogRecord ReadPrepare(TStreamId streamId, long eventNumber); + IPrepareLogRecord ReadPrepare(TStreamId streamId, long eventNumber, ITransactionFileTracker tracker); - TStreamId GetEventStreamIdByTransactionId(long transactionId); + TStreamId GetEventStreamIdByTransactionId(long transactionId, ITransactionFileTracker tracker); - StreamMetadata GetStreamMetadata(TStreamId streamId); - long GetStreamLastEventNumber(TStreamId streamId); - long GetStreamLastEventNumber_KnownCollisions(TStreamId streamId, long beforePosition); - long GetStreamLastEventNumber_NoCollisions(ulong stream, Func getStreamId, long beforePosition); + StreamMetadata GetStreamMetadata(TStreamId streamId, ITransactionFileTracker tracker); + long GetStreamLastEventNumber(TStreamId streamId, ITransactionFileTracker tracker); + long GetStreamLastEventNumber_KnownCollisions(TStreamId streamId, long beforePosition, ITransactionFileTracker tracker); + long GetStreamLastEventNumber_NoCollisions(ulong stream, Func getStreamId, long beforePosition, ITransactionFileTracker tracker); } public abstract class IndexReader { @@ -105,11 +105,12 @@ public IndexReader( _skipIndexScanOnRead = skipIndexScanOnRead; } - IndexReadEventResult IIndexReader.ReadEvent(string streamName, TStreamId streamId, long eventNumber) { + IndexReadEventResult IIndexReader.ReadEvent(string streamName, TStreamId streamId, long eventNumber, + ITransactionFileTracker tracker) { Ensure.Valid(streamId, _validator); if (eventNumber < -1) throw new ArgumentOutOfRangeException("eventNumber"); - using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq + using (var reader = _backend.BorrowReader(tracker)) { return ReadEventInternal(reader, streamName, streamId, eventNumber); } } @@ -154,8 +155,9 @@ private IndexReadEventResult ReadEventInternal(TFReaderLease reader, string stre originalStreamExists: originalStreamExists); } - IPrepareLogRecord IIndexReader.ReadPrepare(TStreamId streamId, long eventNumber) { - using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq + IPrepareLogRecord IIndexReader.ReadPrepare(TStreamId streamId, long eventNumber, + ITransactionFileTracker tracker) { + using (var reader = _backend.BorrowReader(tracker)) { return ReadPrepareInternal(reader, streamId, eventNumber); } } @@ -216,17 +218,18 @@ protected static IPrepareLogRecord ReadPrepareInternal(TFReaderLease } IndexReadStreamResult IIndexReader. - ReadStreamEventsForward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount) { - return ReadStreamEventsForwardInternal(streamName, streamId, fromEventNumber, maxCount, _skipIndexScanOnRead); + ReadStreamEventsForward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount, ITransactionFileTracker tracker) { + return ReadStreamEventsForwardInternal(streamName, streamId, fromEventNumber, maxCount, _skipIndexScanOnRead, tracker); } private IndexReadStreamResult ReadStreamEventsForwardInternal(string streamName, TStreamId streamId, long fromEventNumber, - int maxCount, bool skipIndexScanOnRead) { + int maxCount, bool skipIndexScanOnRead, + ITransactionFileTracker tracker) { Ensure.Valid(streamId, _validator); Ensure.Nonnegative(fromEventNumber, "fromEventNumber"); Ensure.Positive(maxCount, "maxCount"); - using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq + using (var reader = _backend.BorrowReader(tracker)) { var lastEventNumber = GetStreamLastEventNumberCached(reader, streamId); var metadata = GetStreamMetadataCached(reader, streamId); if (lastEventNumber == EventNumber.DeletedStream) @@ -502,8 +505,8 @@ delegate IEnumerable ReadIndexEntries( (indexReader, streamHandle, reader, startEventNumber, endEventNumber) => indexReader._tableIndex.GetRange(streamHandle, startEventNumber, endEventNumber); - public IndexReadEventInfoResult ReadEventInfo_KeepDuplicates(TStreamId streamId, long eventNumber) { - using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq + public IndexReadEventInfoResult ReadEventInfo_KeepDuplicates(TStreamId streamId, long eventNumber, ITransactionFileTracker tracker) { + using (var reader = _backend.BorrowReader(tracker)) { var result = ReadEventInfoForwardInternal( streamId, reader, @@ -521,8 +524,9 @@ public IndexReadEventInfoResult ReadEventInfo_KeepDuplicates(TStreamId streamId, } // note for simplicity skipIndexScanOnRead is always treated as false. see ReadEventInfoInternal - public IndexReadEventInfoResult ReadEventInfoForward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, long beforePosition) { - using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq + public IndexReadEventInfoResult ReadEventInfoForward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, long beforePosition, + ITransactionFileTracker tracker) { + using (var reader = _backend.BorrowReader(tracker)) { return ReadEventInfoForwardInternal( streamId, reader, @@ -593,16 +597,17 @@ private IndexReadEventInfoResult ReadEventInfoForwardInternal( } IndexReadStreamResult IIndexReader. - ReadStreamEventsBackward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount) { - return ReadStreamEventsBackwardInternal(streamName, streamId, fromEventNumber, maxCount, _skipIndexScanOnRead); + ReadStreamEventsBackward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount, ITransactionFileTracker tracker) { + return ReadStreamEventsBackwardInternal(streamName, streamId, fromEventNumber, maxCount, _skipIndexScanOnRead, tracker); } private IndexReadStreamResult ReadStreamEventsBackwardInternal(string streamName, TStreamId streamId, long fromEventNumber, - int maxCount, bool skipIndexScanOnRead) { + int maxCount, bool skipIndexScanOnRead, + ITransactionFileTracker tracker) { Ensure.Valid(streamId, _validator); Ensure.Positive(maxCount, "maxCount"); - using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq + using (var reader = _backend.BorrowReader(tracker)) { var lastEventNumber = GetStreamLastEventNumberCached(reader, streamId); var metadata = GetStreamMetadataCached(reader, streamId); if (lastEventNumber == EventNumber.DeletedStream) @@ -661,14 +666,14 @@ private IndexReadStreamResult ReadStreamEventsBackwardInternal(string streamName } public IndexReadEventInfoResult ReadEventInfoBackward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, - long beforePosition) { + long beforePosition, ITransactionFileTracker tracker) { if (fromEventNumber < 0) - fromEventNumber = GetStreamLastEventNumber_KnownCollisions(streamId, beforePosition); + fromEventNumber = GetStreamLastEventNumber_KnownCollisions(streamId, beforePosition, tracker); if (fromEventNumber == ExpectedVersion.NoStream) return new IndexReadEventInfoResult(new EventInfo[] { }, -1); - using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq + using (var reader = _backend.BorrowReader(tracker)) { return ReadEventInfoBackwardInternal( streamId, reader, @@ -693,10 +698,11 @@ public IndexReadEventInfoResult ReadEventInfoBackward_NoCollisions( Func getStreamId, long fromEventNumber, int maxCount, - long beforePosition) { + long beforePosition, + ITransactionFileTracker tracker) { if (fromEventNumber < 0) - fromEventNumber = GetStreamLastEventNumber_NoCollisions(stream, getStreamId, beforePosition); + fromEventNumber = GetStreamLastEventNumber_NoCollisions(stream, getStreamId, beforePosition, tracker); if (fromEventNumber == ExpectedVersion.NoStream) return new IndexReadEventInfoResult(new EventInfo[] { }, -1); @@ -800,16 +806,16 @@ private EventInfo[] ReadEventInfoInternal( return result; } - public TStreamId GetEventStreamIdByTransactionId(long transactionId) { + public TStreamId GetEventStreamIdByTransactionId(long transactionId, ITransactionFileTracker tracker) { Ensure.Nonnegative(transactionId, "transactionId"); - using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq + using (var reader = _backend.BorrowReader(tracker)) { var res = ReadPrepareInternal(reader, transactionId); return res == null ? default : res.EventStreamId; } } - public StorageMessage.EffectiveAcl GetEffectiveAcl(TStreamId streamId) { - using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq + public StorageMessage.EffectiveAcl GetEffectiveAcl(TStreamId streamId, ITransactionFileTracker tracker) { + using (var reader = _backend.BorrowReader(tracker)) { var sysSettings = _backend.GetSystemSettings() ?? SystemSettings.Default; StreamAcl acl; StreamAcl sysAcl; @@ -828,16 +834,16 @@ public StorageMessage.EffectiveAcl GetEffectiveAcl(TStreamId streamId) { } } - long IIndexReader.GetStreamLastEventNumber(TStreamId streamId) { + long IIndexReader.GetStreamLastEventNumber(TStreamId streamId, ITransactionFileTracker tracker) { Ensure.Valid(streamId, _validator); - using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq + using (var reader = _backend.BorrowReader(tracker)) { return GetStreamLastEventNumberCached(reader, streamId); } } - public long GetStreamLastEventNumber_KnownCollisions(TStreamId streamId, long beforePosition) { + public long GetStreamLastEventNumber_KnownCollisions(TStreamId streamId, long beforePosition, ITransactionFileTracker tracker) { Ensure.Valid(streamId, _validator); - using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq + using (var reader = _backend.BorrowReader(tracker)) { return GetStreamLastEventNumber_KnownCollisions(streamId, beforePosition, reader); } } @@ -858,8 +864,9 @@ bool IsForThisStream(IndexEntry indexEntry) { // gets the last event number before beforePosition for the given stream hash. can assume that // the hash does not collide with anything before beforePosition. - public long GetStreamLastEventNumber_NoCollisions(ulong stream, Func getStreamId, long beforePosition) { - using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq + public long GetStreamLastEventNumber_NoCollisions(ulong stream, Func getStreamId, long beforePosition, + ITransactionFileTracker tracker) { + using (var reader = _backend.BorrowReader(tracker)) { return GetStreamLastEventNumber_NoCollisions(stream, getStreamId, beforePosition, reader); } } @@ -892,9 +899,9 @@ bool IsForThisStream(IndexEntry indexEntry) { return entry.Version; } - StreamMetadata IIndexReader.GetStreamMetadata(TStreamId streamId) { + StreamMetadata IIndexReader.GetStreamMetadata(TStreamId streamId, ITransactionFileTracker tracker) { Ensure.Valid(streamId, _validator); - using (var reader = _backend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq + using (var reader = _backend.BorrowReader(tracker)) { return GetStreamMetadataCached(reader, streamId); } } diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/IndexWriter.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/IndexWriter.cs index 8bc1833304d..394e2065c30 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/IndexWriter.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/IndexWriter.cs @@ -213,11 +213,11 @@ public CommitCheckResult CheckCommit(TStreamId streamId, long expecte if(first) /*no data in transaction*/ return new CommitCheckResult(CommitDecision.Ok, streamId, curVersion, -1, -1, IsSoftDeleted(streamId)); else{ - var isReplicated = _indexReader.GetStreamLastEventNumber(streamId) >= endEventNumber; + var isReplicated = _indexReader.GetStreamLastEventNumber(streamId, ITransactionFileTracker.NoOp) >= endEventNumber; //TODO(clc): the new index should hold the log positions removing this read //n.b. the index will never have the event in the case of NotReady as it only committed records are indexed //in that case the position will need to come from the pre-index - var idempotentEvent = _indexReader.ReadEvent(IndexReader.UnspecifiedStreamName, streamId, endEventNumber); + var idempotentEvent = _indexReader.ReadEvent(IndexReader.UnspecifiedStreamName, streamId, endEventNumber, ITransactionFileTracker.NoOp); var logPos = idempotentEvent.Result == ReadEventResult.Success ? idempotentEvent.Record.LogPosition : -1; if(isReplicated) @@ -238,7 +238,7 @@ public CommitCheckResult CheckCommit(TStreamId streamId, long expecte && prepInfo.EventNumber == eventNumber) continue; - var res = _indexReader.ReadPrepare(streamId, eventNumber); + var res = _indexReader.ReadPrepare(streamId, eventNumber, ITransactionFileTracker.NoOp); if (res != null && res.EventId == eventId) continue; @@ -257,11 +257,11 @@ public CommitCheckResult CheckCommit(TStreamId streamId, long expecte if(eventNumber == expectedVersion) /* no data in transaction */ return new CommitCheckResult(CommitDecision.WrongExpectedVersion, streamId, curVersion, -1, -1, false); else{ - var isReplicated = _indexReader.GetStreamLastEventNumber(streamId) >= eventNumber; + var isReplicated = _indexReader.GetStreamLastEventNumber(streamId, ITransactionFileTracker.NoOp) >= eventNumber; //TODO(clc): the new index should hold the log positions removing this read //n.b. the index will never have the event in the case of NotReady as it only committed records are indexed //in that case the position will need to come from the pre-index - var idempotentEvent = _indexReader.ReadEvent(IndexReader.UnspecifiedStreamName, streamId, eventNumber); + var idempotentEvent = _indexReader.ReadEvent(IndexReader.UnspecifiedStreamName, streamId, eventNumber, ITransactionFileTracker.NoOp); var logPos = idempotentEvent.Result == ReadEventResult.Success ? idempotentEvent.Record.LogPosition : -1; if(isReplicated) @@ -459,7 +459,7 @@ public long GetStreamLastEventNumber(TStreamId streamId) { long lastEventNumber; if (_streamVersions.TryGet(streamId, out lastEventNumber)) return lastEventNumber; - return _indexReader.GetStreamLastEventNumber(streamId); + return _indexReader.GetStreamLastEventNumber(streamId, ITransactionFileTracker.NoOp); } public StreamMetadata GetStreamMetadata(TStreamId streamId) { @@ -472,7 +472,7 @@ public StreamMetadata GetStreamMetadata(TStreamId streamId) { return m; } - return _indexReader.GetStreamMetadata(streamId); + return _indexReader.GetStreamMetadata(streamId, ITransactionFileTracker.NoOp); } public RawMetaInfo GetStreamRawMeta(TStreamId streamId) { @@ -481,7 +481,7 @@ public RawMetaInfo GetStreamRawMeta(TStreamId streamId) { StreamMeta meta; if (!_streamRawMetas.TryGet(streamId, out meta)) - meta = new StreamMeta(_indexReader.ReadPrepare(metastreamId, metaLastEventNumber).Data, null); + meta = new StreamMeta(_indexReader.ReadPrepare(metastreamId, metaLastEventNumber, ITransactionFileTracker.NoOp).Data, null); return new RawMetaInfo(metaLastEventNumber, meta.RawMeta); } diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/ReadIndex.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/ReadIndex.cs index dbae219b700..c23c5d06b51 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/ReadIndex.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/ReadIndex.cs @@ -100,15 +100,15 @@ public ReadIndex(IPublisher bus, } IndexReadEventResult IReadIndex.ReadEvent(string streamName, TStreamId streamId, long eventNumber) { - return _indexReader.ReadEvent(streamName, streamId, eventNumber); + return _indexReader.ReadEvent(streamName, streamId, eventNumber, ITransactionFileTracker.NoOp); } IndexReadStreamResult IReadIndex.ReadStreamEventsForward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount) { - return _indexReader.ReadStreamEventsForward(streamName, streamId, fromEventNumber, maxCount); + return _indexReader.ReadStreamEventsForward(streamName, streamId, fromEventNumber, maxCount, ITransactionFileTracker.NoOp); } IndexReadStreamResult IReadIndex.ReadStreamEventsBackward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount) { - return _indexReader.ReadStreamEventsBackward(streamName, streamId, fromEventNumber, maxCount); + return _indexReader.ReadStreamEventsBackward(streamName, streamId, fromEventNumber, maxCount, ITransactionFileTracker.NoOp); } TStreamId IReadIndex.GetStreamId(string streamName) { @@ -116,11 +116,11 @@ TStreamId IReadIndex.GetStreamId(string streamName) { } public IndexReadEventInfoResult ReadEventInfo_KeepDuplicates(TStreamId streamId, long eventNumber) { - return _indexReader.ReadEventInfo_KeepDuplicates(streamId, eventNumber); + return _indexReader.ReadEventInfo_KeepDuplicates(streamId, eventNumber, ITransactionFileTracker.NoOp); } public IndexReadEventInfoResult ReadEventInfoForward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, long beforePosition) { - return _indexReader.ReadEventInfoForward_KnownCollisions(streamId, fromEventNumber, maxCount, beforePosition); + return _indexReader.ReadEventInfoForward_KnownCollisions(streamId, fromEventNumber, maxCount, beforePosition, ITransactionFileTracker.NoOp); } public IndexReadEventInfoResult ReadEventInfoForward_NoCollisions(ulong stream, long fromEventNumber, int maxCount, long beforePosition) { @@ -129,12 +129,12 @@ public IndexReadEventInfoResult ReadEventInfoForward_NoCollisions(ulong stream, public IndexReadEventInfoResult ReadEventInfoBackward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, long beforePosition) { - return _indexReader.ReadEventInfoBackward_KnownCollisions(streamId, fromEventNumber, maxCount, beforePosition); + return _indexReader.ReadEventInfoBackward_KnownCollisions(streamId, fromEventNumber, maxCount, beforePosition, ITransactionFileTracker.NoOp); } public IndexReadEventInfoResult ReadEventInfoBackward_NoCollisions(ulong stream, Func getStreamId, long fromEventNumber, int maxCount, long beforePosition) { - return _indexReader.ReadEventInfoBackward_NoCollisions(stream, getStreamId, fromEventNumber, maxCount, beforePosition); + return _indexReader.ReadEventInfoBackward_NoCollisions(stream, getStreamId, fromEventNumber, maxCount, beforePosition, ITransactionFileTracker.NoOp); } string IReadIndex.GetStreamName(TStreamId streamId) { @@ -142,27 +142,27 @@ string IReadIndex.GetStreamName(TStreamId streamId) { } bool IReadIndex.IsStreamDeleted(TStreamId streamId) { - return _indexReader.GetStreamLastEventNumber(streamId) == EventNumber.DeletedStream; + return _indexReader.GetStreamLastEventNumber(streamId, ITransactionFileTracker.NoOp) == EventNumber.DeletedStream; } long IReadIndex.GetStreamLastEventNumber(TStreamId streamId) { - return _indexReader.GetStreamLastEventNumber(streamId); + return _indexReader.GetStreamLastEventNumber(streamId, ITransactionFileTracker.NoOp); } public long GetStreamLastEventNumber_KnownCollisions(TStreamId streamId, long beforePosition) { - return _indexReader.GetStreamLastEventNumber_KnownCollisions(streamId, beforePosition); + return _indexReader.GetStreamLastEventNumber_KnownCollisions(streamId, beforePosition, ITransactionFileTracker.NoOp); } public long GetStreamLastEventNumber_NoCollisions(ulong stream, Func getStreamId, long beforePosition) { - return _indexReader.GetStreamLastEventNumber_NoCollisions(stream, getStreamId, beforePosition); + return _indexReader.GetStreamLastEventNumber_NoCollisions(stream, getStreamId, beforePosition, ITransactionFileTracker.NoOp); } StreamMetadata IReadIndex.GetStreamMetadata(TStreamId streamId) { - return _indexReader.GetStreamMetadata(streamId); + return _indexReader.GetStreamMetadata(streamId, ITransactionFileTracker.NoOp); } public TStreamId GetEventStreamIdByTransactionId(long transactionId) { - return _indexReader.GetEventStreamIdByTransactionId(transactionId); + return _indexReader.GetEventStreamIdByTransactionId(transactionId, ITransactionFileTracker.NoOp); } IndexReadAllResult IReadIndex.ReadAllEventsForward(TFPos pos, int maxCount, @@ -188,7 +188,7 @@ IndexReadAllResult IReadIndex.ReadAllEventsBackward(TFPos pos, int maxCount, } public StorageMessage.EffectiveAcl GetEffectiveAcl(TStreamId streamId) { - return _indexReader.GetEffectiveAcl(streamId); + return _indexReader.GetEffectiveAcl(streamId, ITransactionFileTracker.NoOp); } void RegisterHitsMisses(ICacheHitsMissesTracker tracker) { diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunkReader.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunkReader.cs index 75c1ace7f20..1d1560337df 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunkReader.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunkReader.cs @@ -165,7 +165,7 @@ private RecordReadResult TryReadAtInternal(long position, bool couldBeScavenged, var chunk = _db.Manager.GetChunkFor(position); try { CountRead(chunk.IsCached); - return chunk.TryReadAt(chunk.ChunkHeader.GetLocalLogPosition(position), couldBeScavenged, ITransactionFileTracker.NoOp); + return chunk.TryReadAt(chunk.ChunkHeader.GetLocalLogPosition(position), couldBeScavenged, _tracker); } catch (FileBeingDeletedException) { if (retries > MaxRetries) throw new FileBeingDeletedException( @@ -188,7 +188,7 @@ private bool ExistsAtInternal(long position, int retries) { CountRead(chunk.IsCached); if (_optimizeReadSideCache) _existsAtOptimizer.Optimize(chunk); - return chunk.ExistsAt(chunk.ChunkHeader.GetLocalLogPosition(position), ITransactionFileTracker.NoOp); + return chunk.ExistsAt(chunk.ChunkHeader.GetLocalLogPosition(position), _tracker); } catch (FileBeingDeletedException) { if (retries > MaxRetries) throw new FileBeingDeletedException( From c407241ae1125f2010e7dbeba74ab7697553dfc0 Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Fri, 22 Nov 2024 15:29:01 +0000 Subject: [PATCH 19/38] more plumbing (ReadIndex done) --- .../ReadIndexExtensions.cs | 13 +++-- .../GetStreamLastEventNumber_NoCollisions.cs | 19 ++++--- ...dex_result_original_stream_exists_tests.cs | 17 +++--- ...etStreamLastEventNumber_KnownCollisions.cs | 27 ++++----- ...tEventNumber_KnownCollisions_Randomized.cs | 5 +- ...LastEventNumber_NoCollisions_Randomized.cs | 3 +- .../ReadEventInfoBackward_KnownCollisions.cs | 37 ++++++------ ...InfoBackward_KnownCollisions_Randomized.cs | 11 ++-- .../ReadEventInfoBackward_NoCollisions.cs | 35 ++++++------ ...entInfoBackward_NoCollisions_Randomized.cs | 11 ++-- .../ReadEventInfoForward_KnownCollisions.cs | 39 ++++++------- ...tInfoForward_KnownCollisions_Randomized.cs | 5 +- .../ReadIndex/ReadEventInfo_KeepDuplicates.cs | 17 +++--- .../TransactionLog/FakeReadIndex.cs | 30 +++++----- .../Scavenge/Infrastructure/Scenario.cs | 6 +- .../EventTypeIdToNameFromStandardIndex.cs | 4 +- .../LogV3/StreamIdToNameFromStandardIndex.cs | 4 +- .../PersistentSubscriptionService.cs | 5 +- .../Services/RedactionService.cs | 2 +- .../Storage/ReaderIndex/IReadIndex.cs | 28 +++++----- .../Services/Storage/ReaderIndex/ReadIndex.cs | 56 +++++++++---------- .../Services/Storage/StorageReaderWorker.cs | 24 ++++---- .../Services/SubscriptionsService.cs | 7 ++- .../TransactionLog/Chunks/TFChunkScavenger.cs | 10 ++-- .../DbAccess/IndexReaderForAccumulator.cs | 6 +- .../DbAccess/IndexReaderForCalculator.cs | 6 +- 26 files changed, 221 insertions(+), 206 deletions(-) diff --git a/src/EventStore.Core.Tests/ReadIndexExtensions.cs b/src/EventStore.Core.Tests/ReadIndexExtensions.cs index 4ac6bd96823..68e8b90bc37 100644 --- a/src/EventStore.Core.Tests/ReadIndexExtensions.cs +++ b/src/EventStore.Core.Tests/ReadIndexExtensions.cs @@ -3,6 +3,7 @@ using EventStore.Core.Data; using EventStore.Core.Services; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; namespace EventStore.Core.Tests { // Extensions to perform streamlookups inline @@ -10,32 +11,32 @@ namespace EventStore.Core.Tests { public static class IReadIndexExtensions { public static bool IsStreamDeleted(this IReadIndex self, string streamName) { var streamId = self.GetStreamId(streamName); - return self.IsStreamDeleted(streamId); + return self.IsStreamDeleted(streamId, ITransactionFileTracker.NoOp); } public static long GetStreamLastEventNumber(this IReadIndex self, string streamName) { var streamId = self.GetStreamId(streamName); - return self.GetStreamLastEventNumber(streamId); + return self.GetStreamLastEventNumber(streamId, ITransactionFileTracker.NoOp); } public static IndexReadEventResult ReadEvent(this IReadIndex self, string streamName, long eventNumber) { var streamId = self.GetStreamId(streamName); - return self.ReadEvent(streamName, streamId, eventNumber); + return self.ReadEvent(streamName, streamId, eventNumber, ITransactionFileTracker.NoOp); } public static IndexReadStreamResult ReadStreamEventsForward(this IReadIndex self, string streamName, long fromEventNumber, int maxCount) { var streamId = self.GetStreamId(streamName); - return self.ReadStreamEventsForward(streamName, streamId, fromEventNumber, maxCount); + return self.ReadStreamEventsForward(streamName, streamId, fromEventNumber, maxCount, ITransactionFileTracker.NoOp); } public static IndexReadStreamResult ReadStreamEventsBackward(this IReadIndex self, string streamName, long fromEventNumber, int maxCount) { var streamId = self.GetStreamId(streamName); - return self.ReadStreamEventsBackward(streamName, streamId, fromEventNumber, maxCount); + return self.ReadStreamEventsBackward(streamName, streamId, fromEventNumber, maxCount, ITransactionFileTracker.NoOp); } public static StreamMetadata GetStreamMetadata(this IReadIndex self, string streamName) { var streamId = self.GetStreamId(streamName); - return self.GetStreamMetadata(streamId); + return self.GetStreamMetadata(streamId, ITransactionFileTracker.NoOp); } public static List EventRecords(this IndexReadAllResult result) { diff --git a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/GetStreamLastEventNumber_NoCollisions.cs b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/GetStreamLastEventNumber_NoCollisions.cs index cf973c48113..f272c45197f 100644 --- a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/GetStreamLastEventNumber_NoCollisions.cs +++ b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/GetStreamLastEventNumber_NoCollisions.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Tests.Index.Hashers; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.Services.Storage.HashCollisions { @@ -35,7 +36,7 @@ public void with_no_events() { ReadIndex.GetStreamLastEventNumber_NoCollisions( Hash, GetStreamId, - long.MaxValue)); + long.MaxValue, ITransactionFileTracker.NoOp)); } } @@ -50,7 +51,7 @@ public void with_one_event() { ReadIndex.GetStreamLastEventNumber_NoCollisions( Hash, GetStreamId, - long.MaxValue)); + long.MaxValue, ITransactionFileTracker.NoOp)); } } @@ -79,7 +80,7 @@ public void with_multiple_events() { ReadIndex.GetStreamLastEventNumber_NoCollisions( Hash, GetStreamId, - long.MaxValue)); + long.MaxValue, ITransactionFileTracker.NoOp)); } [Test] @@ -88,31 +89,31 @@ public void with_multiple_events_and_before_position() { ReadIndex.GetStreamLastEventNumber_NoCollisions( Hash, GetStreamId, - _third.LogPosition + 1)); + _third.LogPosition + 1, ITransactionFileTracker.NoOp)); Assert.AreEqual(2, ReadIndex.GetStreamLastEventNumber_NoCollisions( Hash, GetStreamId, - _third.LogPosition)); + _third.LogPosition, ITransactionFileTracker.NoOp)); Assert.AreEqual(1, ReadIndex.GetStreamLastEventNumber_NoCollisions( Hash, GetStreamId, - _second.LogPosition)); + _second.LogPosition, ITransactionFileTracker.NoOp)); Assert.AreEqual(0, ReadIndex.GetStreamLastEventNumber_NoCollisions( Hash, GetStreamId, - _first.LogPosition)); + _first.LogPosition, ITransactionFileTracker.NoOp)); Assert.AreEqual(ExpectedVersion.NoStream, ReadIndex.GetStreamLastEventNumber_NoCollisions( Hash, GetStreamId, - _zeroth.LogPosition)); + _zeroth.LogPosition, ITransactionFileTracker.NoOp)); } } @@ -131,7 +132,7 @@ public void with_deleted_stream() { ReadIndex.GetStreamLastEventNumber_NoCollisions( Hash, GetStreamId, - long.MaxValue)); + long.MaxValue, ITransactionFileTracker.NoOp)); } } } diff --git a/src/EventStore.Core.Tests/Services/Storage/Metastreams/read_index_result_original_stream_exists_tests.cs b/src/EventStore.Core.Tests/Services/Storage/Metastreams/read_index_result_original_stream_exists_tests.cs index c4571ab0c1c..67c2976f08f 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Metastreams/read_index_result_original_stream_exists_tests.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Metastreams/read_index_result_original_stream_exists_tests.cs @@ -2,6 +2,7 @@ using EventStore.Core.Services; using EventStore.Core.Services.Storage.ReaderIndex; using EventStore.Core.Tests.TransactionLog.Scavenging.Helpers; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -25,7 +26,7 @@ protected override DbResult CreateDb(TFChunkDbCreationHelper= @event.EventNumber should be filtered out result = ReadIndex.ReadEventInfoBackward_KnownCollisions(Stream, - @event.EventNumber, int.MaxValue, @event.LogPosition); + @event.EventNumber, int.MaxValue, @event.LogPosition, ITransactionFileTracker.NoOp); CheckResult(curEvents.ToArray(), result); Assert.True(result.IsEndOfStream); result = ReadIndex.ReadEventInfoBackward_KnownCollisions(Stream, - @event.EventNumber + 1, int.MaxValue, @event.LogPosition); + @event.EventNumber + 1, int.MaxValue, @event.LogPosition, ITransactionFileTracker.NoOp); CheckResult(curEvents.ToArray(), result); Assert.True(result.IsEndOfStream); } - result = ReadIndex.ReadEventInfoBackward_KnownCollisions(Stream, -1, int.MaxValue, @event.LogPosition); + result = ReadIndex.ReadEventInfoBackward_KnownCollisions(Stream, -1, int.MaxValue, @event.LogPosition, ITransactionFileTracker.NoOp); CheckResult(curEvents.ToArray(), result); Assert.True(result.IsEndOfStream); @@ -99,7 +100,7 @@ public void returns_correct_events_with_max_count() { Assert.GreaterOrEqual(fromEventNumber, 0); var result = ReadIndex.ReadEventInfoBackward_KnownCollisions( - Stream, fromEventNumber, maxCount, long.MaxValue); + Stream, fromEventNumber, maxCount, long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(curEvents.Skip(curEvents.Count - maxCount).ToArray(), result); if (fromEventNumber - maxCount < 0) diff --git a/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoBackward_NoCollisions.cs b/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoBackward_NoCollisions.cs index 4b397a0866f..57c037072d6 100644 --- a/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoBackward_NoCollisions.cs +++ b/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoBackward_NoCollisions.cs @@ -4,6 +4,7 @@ using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; using EventStore.Core.Tests.Index.Hashers; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.Services.Storage.ReadIndex { @@ -48,7 +49,7 @@ public void with_no_events() { GetStreamId, 0, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(0, result.EventInfos.Length); Assert.True(result.IsEndOfStream); @@ -58,7 +59,7 @@ public void with_no_events() { GetStreamId, -1, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(0, result.EventInfos.Length); Assert.True(result.IsEndOfStream); @@ -79,7 +80,7 @@ public void with_one_event() { GetStreamId, 0, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.EventInfos.Length); CheckResult(new[] { _event }, result); @@ -90,7 +91,7 @@ public void with_one_event() { GetStreamId, 1, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(new[] { _event }, result); Assert.True(result.IsEndOfStream); @@ -100,7 +101,7 @@ public void with_one_event() { GetStreamId, -1, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.EventInfos.Length); CheckResult(new[] { _event }, result); @@ -135,7 +136,7 @@ public void with_multiple_events() { GetStreamId, fromEventNumber, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(_events.Take(fromEventNumber + 1).ToArray(), result); Assert.True(result.IsEndOfStream); @@ -150,7 +151,7 @@ public void with_multiple_events_and_max_count() { GetStreamId, fromEventNumber, 2, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(_events.Take(fromEventNumber + 1).Skip(fromEventNumber + 1 - 2).ToArray(), result); if (fromEventNumber - 2 < 0) @@ -168,7 +169,7 @@ public void with_multiple_events_and_before_position() { GetStreamId, fromEventNumber, int.MaxValue, - _events[fromEventNumber + 1].LogPosition); + _events[fromEventNumber + 1].LogPosition, ITransactionFileTracker.NoOp); CheckResult(_events.Take(fromEventNumber + 1).ToArray(), result); Assert.True(result.IsEndOfStream); @@ -178,7 +179,7 @@ public void with_multiple_events_and_before_position() { GetStreamId, -1, int.MaxValue, - _events[fromEventNumber + 1].LogPosition); + _events[fromEventNumber + 1].LogPosition, ITransactionFileTracker.NoOp); CheckResult(_events.Take(fromEventNumber + 1).ToArray(), result); Assert.True(result.IsEndOfStream); @@ -204,7 +205,7 @@ public void can_read_events() { GetStreamId, 1, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(_events.ToArray(), result); Assert.True(result.IsEndOfStream); @@ -217,7 +218,7 @@ public void can_read_tombstone_event() { GetStreamId, EventNumber.DeletedStream, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.EventInfos.Length); Assert.AreEqual(EventNumber.DeletedStream, result.EventInfos[0].EventNumber); @@ -228,7 +229,7 @@ public void can_read_tombstone_event() { GetStreamId, -1, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.EventInfos.Length); Assert.AreEqual(EventNumber.DeletedStream, result.EventInfos[0].EventNumber); @@ -239,7 +240,7 @@ public void can_read_tombstone_event() { GetStreamId, EventNumber.DeletedStream - 1, 1, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(0, result.EventInfos.Length); Assert.AreEqual(1, result.NextEventNumber); @@ -272,7 +273,7 @@ public void strictly_returns_up_to_max_count_consecutive_events_from_start_event GetStreamId, 7, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(_events.ToArray(), result); Assert.True(result.IsEndOfStream); @@ -282,7 +283,7 @@ public void strictly_returns_up_to_max_count_consecutive_events_from_start_event GetStreamId, 7, 4, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(_events.Skip(1).ToArray(), result); Assert.AreEqual(3, result.NextEventNumber); @@ -293,7 +294,7 @@ public void strictly_returns_up_to_max_count_consecutive_events_from_start_event GetStreamId, 3, 1, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(0, result.EventInfos.Length); Assert.AreEqual(0, result.NextEventNumber); @@ -328,7 +329,7 @@ public void result_is_deduplicated_keeping_oldest_duplicates() { GetStreamId, 3, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult( _events diff --git a/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoBackward_NoCollisions_Randomized.cs b/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoBackward_NoCollisions_Randomized.cs index 29e2e48fe3d..b0dca6c8ab2 100644 --- a/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoBackward_NoCollisions_Randomized.cs +++ b/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoBackward_NoCollisions_Randomized.cs @@ -4,6 +4,7 @@ using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; using EventStore.Core.Tests.Index.Hashers; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.Services.Storage.ReadIndex { @@ -62,24 +63,24 @@ public void returns_correct_events_before_position() { IndexReadEventInfoResult result; if (@event.EventStreamId == Stream) { result = ReadIndex.ReadEventInfoBackward_NoCollisions(Hash, GetStreamId, - @event.EventNumber - 1, int.MaxValue, @event.LogPosition); + @event.EventNumber - 1, int.MaxValue, @event.LogPosition, ITransactionFileTracker.NoOp); CheckResult(curEvents.ToArray(), result); Assert.True(result.IsEndOfStream); // events >= @event.EventNumber should be filtered out result = ReadIndex.ReadEventInfoBackward_NoCollisions(Hash, GetStreamId, - @event.EventNumber, int.MaxValue, @event.LogPosition); + @event.EventNumber, int.MaxValue, @event.LogPosition, ITransactionFileTracker.NoOp); CheckResult(curEvents.ToArray(), result); Assert.True(result.IsEndOfStream); result = ReadIndex.ReadEventInfoBackward_NoCollisions(Hash, GetStreamId, - @event.EventNumber + 1, int.MaxValue, @event.LogPosition); + @event.EventNumber + 1, int.MaxValue, @event.LogPosition, ITransactionFileTracker.NoOp); CheckResult(curEvents.ToArray(), result); Assert.True(result.IsEndOfStream); } result = ReadIndex.ReadEventInfoBackward_NoCollisions(Hash, GetStreamId, -1, int.MaxValue, - @event.LogPosition); + @event.LogPosition, ITransactionFileTracker.NoOp); CheckResult(curEvents.ToArray(), result); Assert.True(result.IsEndOfStream); @@ -104,7 +105,7 @@ public void returns_correct_events_with_max_count() { var result = ReadIndex.ReadEventInfoBackward_NoCollisions( - Hash, GetStreamId, fromEventNumber, maxCount, long.MaxValue); + Hash, GetStreamId, fromEventNumber, maxCount, long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(curEvents.Skip(curEvents.Count - maxCount).ToArray(), result); if (fromEventNumber - maxCount < 0) diff --git a/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoForward_KnownCollisions.cs b/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoForward_KnownCollisions.cs index 1ec7577bcdd..171235f2ef2 100644 --- a/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoForward_KnownCollisions.cs +++ b/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoForward_KnownCollisions.cs @@ -3,6 +3,7 @@ using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; using EventStore.Core.Tests.Index.Hashers; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.Services.Storage.ReadIndex { @@ -47,7 +48,7 @@ public void with_no_events() { Stream, 0, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(0, result.EventInfos.Length); Assert.True(result.IsEndOfStream); @@ -68,7 +69,7 @@ public void with_one_event() { Stream, 0, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.EventInfos.Length); CheckResult(new[] { _event }, result); @@ -78,7 +79,7 @@ public void with_one_event() { Stream, 1, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(new EventRecord[] { }, result); Assert.True(result.IsEndOfStream); @@ -87,7 +88,7 @@ public void with_one_event() { CollidingStream, 0, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.EventInfos.Length); CheckResult(new[] { _collidingEvent }, result); @@ -97,7 +98,7 @@ public void with_one_event() { CollidingStream, 1, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(new EventRecord[] { }, result); Assert.True(result.IsEndOfStream); @@ -130,7 +131,7 @@ public void with_multiple_events() { Stream, fromEventNumber, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(_events.Skip(fromEventNumber).ToArray(), result); if (fromEventNumber > 3) @@ -147,7 +148,7 @@ public void with_multiple_events_and_max_count() { Stream, fromEventNumber, 2, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(_events.Skip(fromEventNumber).Take(2).ToArray(), result); if (fromEventNumber > 3) @@ -164,7 +165,7 @@ public void with_multiple_events_and_before_position() { Stream, fromEventNumber, int.MaxValue, - _events[fromEventNumber + 1].LogPosition); + _events[fromEventNumber + 1].LogPosition, ITransactionFileTracker.NoOp); CheckResult(_events.Skip(fromEventNumber).Take(1).ToArray(), result); Assert.AreEqual((long) fromEventNumber + int.MaxValue, result.NextEventNumber); @@ -190,7 +191,7 @@ public void can_read_events_and_tombstone_event_not_returned() { Stream, 0, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(_events.ToArray(), result); Assert.AreEqual(int.MaxValue, result.NextEventNumber); @@ -202,7 +203,7 @@ public void next_event_number_set_correctly() { Stream, 2, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(0, result.EventInfos.Length); Assert.AreEqual(EventNumber.DeletedStream, result.NextEventNumber); @@ -214,7 +215,7 @@ public void can_read_tombstone_event() { Stream, EventNumber.DeletedStream, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.EventInfos.Length); Assert.AreEqual(EventNumber.DeletedStream, result.EventInfos[0].EventNumber); @@ -248,7 +249,7 @@ public void strictly_returns_up_to_max_count_consecutive_events_from_start_event Stream, 0, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(_events.ToArray(), result); Assert.AreEqual(int.MaxValue, result.NextEventNumber); @@ -257,7 +258,7 @@ public void strictly_returns_up_to_max_count_consecutive_events_from_start_event Stream, 0, 3, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(_events.Take(1).ToArray(), result); Assert.AreEqual(3, result.NextEventNumber); @@ -266,7 +267,7 @@ public void strictly_returns_up_to_max_count_consecutive_events_from_start_event Stream, 3, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(_events.Skip(1).ToArray(), result); Assert.AreEqual((long ) 3 + int.MaxValue, result.NextEventNumber); @@ -275,7 +276,7 @@ public void strictly_returns_up_to_max_count_consecutive_events_from_start_event Stream, 4, 3, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(_events.Skip(1).Take(2).ToArray(), result); Assert.AreEqual(7, result.NextEventNumber); @@ -284,7 +285,7 @@ public void strictly_returns_up_to_max_count_consecutive_events_from_start_event Stream, 7, 3, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(0, result.EventInfos.Length); Assert.AreEqual(11, result.NextEventNumber); @@ -293,7 +294,7 @@ public void strictly_returns_up_to_max_count_consecutive_events_from_start_event Stream, 12, 1, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(0, result.EventInfos.Length); Assert.AreEqual(15, result.NextEventNumber); // from colliding stream, but doesn't matter much @@ -302,7 +303,7 @@ public void strictly_returns_up_to_max_count_consecutive_events_from_start_event Stream, 12, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(0, result.EventInfos.Length); Assert.True(result.IsEndOfStream); @@ -336,7 +337,7 @@ public void result_is_deduplicated_keeping_oldest_duplicates() { Stream, 0, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult( _events diff --git a/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoForward_KnownCollisions_Randomized.cs b/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoForward_KnownCollisions_Randomized.cs index ba6c57ec2df..2dcec589ef4 100644 --- a/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoForward_KnownCollisions_Randomized.cs +++ b/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoForward_KnownCollisions_Randomized.cs @@ -4,6 +4,7 @@ using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; using EventStore.Core.Tests.Index.Hashers; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.Services.Storage.ReadIndex { @@ -54,7 +55,7 @@ public void returns_correct_events_before_position() { var curEvents = new List(); foreach (var @event in _events) { var result = - ReadIndex.ReadEventInfoForward_KnownCollisions(Stream, 0, int.MaxValue, @event.LogPosition); + ReadIndex.ReadEventInfoForward_KnownCollisions(Stream, 0, int.MaxValue, @event.LogPosition, ITransactionFileTracker.NoOp); CheckResult(curEvents.ToArray(), result); if (curEvents.Count == 0) Assert.True(result.IsEndOfStream); @@ -81,7 +82,7 @@ public void returns_correct_events_with_max_count() { Assert.GreaterOrEqual(fromEventNumber, 0); var result = ReadIndex.ReadEventInfoForward_KnownCollisions( - Stream, fromEventNumber, maxCount, long.MaxValue); + Stream, fromEventNumber, maxCount, long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(curEvents.Skip(curEvents.Count - maxCount).ToArray(), result); Assert.AreEqual(@event.EventNumber + 1, result.NextEventNumber); } diff --git a/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfo_KeepDuplicates.cs b/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfo_KeepDuplicates.cs index 38ca0037d14..9ff0f9d9d3b 100644 --- a/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfo_KeepDuplicates.cs +++ b/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfo_KeepDuplicates.cs @@ -3,6 +3,7 @@ using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; using EventStore.Core.Tests.Index.Hashers; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.Services.Storage.ReadIndex { @@ -51,7 +52,7 @@ protected override void WriteTestScenario() { [Test] public void returns_correct_info_for_normal_event() { - var result = ReadIndex.ReadEventInfo_KeepDuplicates(Stream, 1); + var result = ReadIndex.ReadEventInfo_KeepDuplicates(Stream, 1, ITransactionFileTracker.NoOp); var events = _events .Where(x => x.EventStreamId == Stream && x.EventNumber == 1) .ToArray(); @@ -64,7 +65,7 @@ public void returns_correct_info_for_normal_event() { [Test] public void returns_correct_info_for_duplicate_events() { - var result = ReadIndex.ReadEventInfo_KeepDuplicates(Stream, 2); + var result = ReadIndex.ReadEventInfo_KeepDuplicates(Stream, 2, ITransactionFileTracker.NoOp); var events = _events .Where(x => x.EventStreamId == Stream && x.EventNumber == 2) .ToArray(); @@ -77,7 +78,7 @@ public void returns_correct_info_for_duplicate_events() { [Test] public void returns_correct_info_for_colliding_stream() { - var result = ReadIndex.ReadEventInfo_KeepDuplicates(Stream, 3); + var result = ReadIndex.ReadEventInfo_KeepDuplicates(Stream, 3, ITransactionFileTracker.NoOp); var events = _events .Where(x => x.EventStreamId == Stream && x.EventNumber == 3) .ToArray(); @@ -87,7 +88,7 @@ public void returns_correct_info_for_colliding_stream() { Assert.AreEqual(true, result.IsEndOfStream); CheckResult(events, result); - result = ReadIndex.ReadEventInfo_KeepDuplicates(CollidingStream, 3); + result = ReadIndex.ReadEventInfo_KeepDuplicates(CollidingStream, 3, ITransactionFileTracker.NoOp); events = _events .Where(x => x.EventStreamId == CollidingStream && x.EventNumber == 3) .ToArray(); @@ -100,7 +101,7 @@ public void returns_correct_info_for_colliding_stream() { [Test] public void returns_correct_info_for_soft_deleted_stream() { - var result = ReadIndex.ReadEventInfo_KeepDuplicates(SoftDeletedStream, 10); + var result = ReadIndex.ReadEventInfo_KeepDuplicates(SoftDeletedStream, 10, ITransactionFileTracker.NoOp); var events = _events .Where(x => x.EventStreamId == SoftDeletedStream && x.EventNumber == 10) .ToArray(); @@ -113,7 +114,7 @@ public void returns_correct_info_for_soft_deleted_stream() { [Test] public void returns_correct_info_for_hard_deleted_stream() { - var result = ReadIndex.ReadEventInfo_KeepDuplicates(HardDeletedStream, 20); + var result = ReadIndex.ReadEventInfo_KeepDuplicates(HardDeletedStream, 20, ITransactionFileTracker.NoOp); var events = _events .Where(x => x.EventStreamId == HardDeletedStream && x.EventNumber == 20) .ToArray(); @@ -126,7 +127,7 @@ public void returns_correct_info_for_hard_deleted_stream() { [Test] public void returns_empty_info_when_event_does_not_exist() { - var result = ReadIndex.ReadEventInfo_KeepDuplicates(Stream, 6); + var result = ReadIndex.ReadEventInfo_KeepDuplicates(Stream, 6, ITransactionFileTracker.NoOp); var events = _events .Where(x => x.EventStreamId == Stream && x.EventNumber == 6) .ToArray(); @@ -135,7 +136,7 @@ public void returns_empty_info_when_event_does_not_exist() { Assert.AreEqual(-1, result.NextEventNumber); Assert.AreEqual(true, result.IsEndOfStream); - result = ReadIndex.ReadEventInfo_KeepDuplicates(CollidingStream, 4); + result = ReadIndex.ReadEventInfo_KeepDuplicates(CollidingStream, 4, ITransactionFileTracker.NoOp); events = _events .Where(x => x.EventStreamId == CollidingStream && x.EventNumber == 4) .ToArray(); diff --git a/src/EventStore.Core.Tests/TransactionLog/FakeReadIndex.cs b/src/EventStore.Core.Tests/TransactionLog/FakeReadIndex.cs index 3f95b03c573..ed0f628b3cb 100644 --- a/src/EventStore.Core.Tests/TransactionLog/FakeReadIndex.cs +++ b/src/EventStore.Core.Tests/TransactionLog/FakeReadIndex.cs @@ -48,24 +48,24 @@ public ReadIndexStats GetStatistics() { throw new NotImplementedException(); } - public IndexReadEventResult ReadEvent(string streamName, TStreamId streamId, long eventNumber) { + public IndexReadEventResult ReadEvent(string streamName, TStreamId streamId, long eventNumber, ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public IndexReadStreamResult ReadStreamEventsBackward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount) { + public IndexReadStreamResult ReadStreamEventsBackward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount, ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public IndexReadStreamResult ReadStreamEventsForward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount) { + public IndexReadStreamResult ReadStreamEventsForward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount, ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public IndexReadEventInfoResult ReadEventInfo_KeepDuplicates(TStreamId streamId, long eventNumber) { + public IndexReadEventInfoResult ReadEventInfo_KeepDuplicates(TStreamId streamId, long eventNumber, ITransactionFileTracker tracker) { throw new NotImplementedException(); } public IndexReadEventInfoResult ReadEventInfoForward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, - long beforePosition) { + long beforePosition, ITransactionFileTracker tracker) { throw new NotImplementedException(); } @@ -74,12 +74,12 @@ public IndexReadEventInfoResult ReadEventInfoForward_NoCollisions(ulong stream, } public IndexReadEventInfoResult ReadEventInfoBackward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, - long beforePosition) { + long beforePosition, ITransactionFileTracker tracker) { throw new NotImplementedException(); } public IndexReadEventInfoResult ReadEventInfoBackward_NoCollisions(ulong stream, Func getStreamId, long fromEventNumber, - int maxCount, long beforePosition) { + int maxCount, long beforePosition, ITransactionFileTracker tracker) { throw new NotImplementedException(); } @@ -105,29 +105,29 @@ public IndexReadAllResult ReadAllEventsBackwardFiltered(TFPos pos, int maxCount, throw new NotImplementedException(); } - public bool IsStreamDeleted(TStreamId streamId) { + public bool IsStreamDeleted(TStreamId streamId, ITransactionFileTracker tracker) { return _isStreamDeleted(streamId); } - public long GetStreamLastEventNumber(TStreamId streamId) { + public long GetStreamLastEventNumber(TStreamId streamId, ITransactionFileTracker tracker) { if (_metastreams.IsMetaStream(streamId)) - return GetStreamLastEventNumber(_metastreams.OriginalStreamOf(streamId)); + return GetStreamLastEventNumber(_metastreams.OriginalStreamOf(streamId), ITransactionFileTracker.NoOp); return _isStreamDeleted(streamId) ? EventNumber.DeletedStream : 1000000; } - public long GetStreamLastEventNumber_KnownCollisions(TStreamId streamId, long beforePosition) { + public long GetStreamLastEventNumber_KnownCollisions(TStreamId streamId, long beforePosition, ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public long GetStreamLastEventNumber_NoCollisions(ulong stream, Func getStreamId, long beforePosition) { + public long GetStreamLastEventNumber_NoCollisions(ulong stream, Func getStreamId, long beforePosition, ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public StorageMessage.EffectiveAcl GetEffectiveAcl(TStreamId streamId) { + public StorageMessage.EffectiveAcl GetEffectiveAcl(TStreamId streamId, ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public TStreamId GetEventStreamIdByTransactionId(long transactionId) { + public TStreamId GetEventStreamIdByTransactionId(long transactionId, ITransactionFileTracker tracker) { throw new NotImplementedException(); } @@ -135,7 +135,7 @@ public StreamAccess CheckStreamAccess(TStreamId streamId, StreamAccessType strea throw new NotImplementedException(); } - public StreamMetadata GetStreamMetadata(TStreamId streamId) { + public StreamMetadata GetStreamMetadata(TStreamId streamId, ITransactionFileTracker tracker) { throw new NotImplementedException(); } diff --git a/src/EventStore.Core.XUnit.Tests/Scavenge/Infrastructure/Scenario.cs b/src/EventStore.Core.XUnit.Tests/Scavenge/Infrastructure/Scenario.cs index 8eea61fef78..30278de71ed 100644 --- a/src/EventStore.Core.XUnit.Tests/Scavenge/Infrastructure/Scenario.cs +++ b/src/EventStore.Core.XUnit.Tests/Scavenge/Infrastructure/Scenario.cs @@ -641,7 +641,7 @@ private static void CheckIndex( streamId: streamId, fromEventNumber: eventNumber, maxCount: 1, - beforePosition: long.MaxValue) + beforePosition: long.MaxValue, tracker: ITransactionFileTracker.NoOp) : actual.ReadEventInfoForward_NoCollisions( stream: hasher.Hash(streamId), fromEventNumber: eventNumber, @@ -650,7 +650,7 @@ private static void CheckIndex( if (result.EventInfos.Length != 1) { // remember this applies metadata, so is of limited use - var wholeStream = actual.ReadStreamEventsForward($"{streamId}", streamId, fromEventNumber: 0, maxCount: 100); + var wholeStream = actual.ReadStreamEventsForward($"{streamId}", streamId, fromEventNumber: 0, maxCount: 100, tracker: ITransactionFileTracker.NoOp); Assert.True(result.EventInfos.Length == 1, $"Couldn't find {streamId}:{eventNumber} in index."); } @@ -675,7 +675,7 @@ private static void CheckIndex( streamId: streamId, fromEventNumber: 0, maxCount: 1000, - beforePosition: long.MaxValue) + beforePosition: long.MaxValue, tracker: ITransactionFileTracker.NoOp) : actual.ReadEventInfoForward_NoCollisions( stream: hasher.Hash(streamId), fromEventNumber: 0, diff --git a/src/EventStore.Core/LogV3/EventTypeIdToNameFromStandardIndex.cs b/src/EventStore.Core/LogV3/EventTypeIdToNameFromStandardIndex.cs index 8364ae716ba..3243cdc0741 100644 --- a/src/EventStore.Core/LogV3/EventTypeIdToNameFromStandardIndex.cs +++ b/src/EventStore.Core/LogV3/EventTypeIdToNameFromStandardIndex.cs @@ -16,7 +16,7 @@ public EventTypeIdToNameFromStandardIndex(IIndexReader indexReader) { public bool TryGetName(uint eventTypeId, out string name) { var record = _indexReader.ReadPrepare( streamId: LogV3SystemStreams.EventTypesStreamNumber, - eventNumber: EventTypeIdConverter.ToEventNumber(eventTypeId), tracker: ITransactionFileTracker.NoOp); + eventNumber: EventTypeIdConverter.ToEventNumber(eventTypeId), tracker: ITransactionFileTracker.NoOp); // noop ok: LogV3 if (record is null) { name = null; @@ -31,7 +31,7 @@ public bool TryGetName(uint eventTypeId, out string name) { } public bool TryGetLastValue(out uint lastValue) { - var lastEventNumber = _indexReader.GetStreamLastEventNumber(LogV3SystemStreams.EventTypesStreamNumber, ITransactionFileTracker.NoOp); + var lastEventNumber = _indexReader.GetStreamLastEventNumber(LogV3SystemStreams.EventTypesStreamNumber, ITransactionFileTracker.NoOp); // noop ok: LogV3 var success = ExpectedVersion.NoStream < lastEventNumber && lastEventNumber != EventNumber.DeletedStream; lastValue = EventTypeIdConverter.ToEventTypeId(lastEventNumber); return success; diff --git a/src/EventStore.Core/LogV3/StreamIdToNameFromStandardIndex.cs b/src/EventStore.Core/LogV3/StreamIdToNameFromStandardIndex.cs index 52bd4c90530..9205e6b0824 100644 --- a/src/EventStore.Core/LogV3/StreamIdToNameFromStandardIndex.cs +++ b/src/EventStore.Core/LogV3/StreamIdToNameFromStandardIndex.cs @@ -22,7 +22,7 @@ public bool TryGetName(StreamId streamId, out string name) { // explicitly create metastreams. var record = _indexReader.ReadPrepare( streamId: LogV3SystemStreams.StreamsCreatedStreamNumber, - eventNumber: StreamIdConverter.ToEventNumber(streamId), tracker: ITransactionFileTracker.NoOp); + eventNumber: StreamIdConverter.ToEventNumber(streamId), tracker: ITransactionFileTracker.NoOp); // noop ok: LogV3 if (record is null) { name = null; @@ -37,7 +37,7 @@ public bool TryGetName(StreamId streamId, out string name) { } public bool TryGetLastValue(out StreamId lastValue) { - var lastEventNumber = _indexReader.GetStreamLastEventNumber(LogV3SystemStreams.StreamsCreatedStreamNumber, ITransactionFileTracker.NoOp); + var lastEventNumber = _indexReader.GetStreamLastEventNumber(LogV3SystemStreams.StreamsCreatedStreamNumber, ITransactionFileTracker.NoOp); // noop ok: LogV3 var success = ExpectedVersion.NoStream < lastEventNumber && lastEventNumber != EventNumber.DeletedStream; lastValue = StreamIdConverter.ToStreamId(lastEventNumber); return success; diff --git a/src/EventStore.Core/Services/PersistentSubscription/PersistentSubscriptionService.cs b/src/EventStore.Core/Services/PersistentSubscription/PersistentSubscriptionService.cs index 18cc383892a..409541b733e 100644 --- a/src/EventStore.Core/Services/PersistentSubscription/PersistentSubscriptionService.cs +++ b/src/EventStore.Core/Services/PersistentSubscription/PersistentSubscriptionService.cs @@ -13,6 +13,7 @@ using EventStore.Core.Services.TimerService; using EventStore.Core.Services.UserManagement; using EventStore.Core.Telemetry; +using EventStore.Core.TransactionLog; using ILogger = Serilog.ILogger; using ReadStreamResult = EventStore.Core.Data.ReadStreamResult; @@ -936,7 +937,7 @@ public void ConnectToPersistentSubscription( long? lastEventNumber = null; if (eventSource.FromStream) { var streamId = _readIndex.GetStreamId(eventSource.EventStreamId); - lastEventNumber = _readIndex.GetStreamLastEventNumber(streamId); + lastEventNumber = _readIndex.GetStreamLastEventNumber(streamId, ITransactionFileTracker.NoOp); } var lastCommitPos = _readIndex.LastIndexedPosition; var subscribedMessage = @@ -1011,7 +1012,7 @@ private ResolvedEvent ResolveLinkToEvent(EventRecord eventRecord, long commitPos long eventNumber = long.Parse(parts[0]); string streamName = parts[1]; var streamId = _readIndex.GetStreamId(streamName); - var res = _readIndex.ReadEvent(streamName, streamId, eventNumber); + var res = _readIndex.ReadEvent(streamName, streamId, eventNumber, ITransactionFileTracker.NoOp); if (res.Result == ReadEventResult.Success) return ResolvedEvent.ForResolvedLink(res.Record, eventRecord, commitPosition); diff --git a/src/EventStore.Core/Services/RedactionService.cs b/src/EventStore.Core/Services/RedactionService.cs index ec9f4d50e68..48be23ff766 100644 --- a/src/EventStore.Core/Services/RedactionService.cs +++ b/src/EventStore.Core/Services/RedactionService.cs @@ -66,7 +66,7 @@ public void Handle(RedactionMessage.GetEventPosition message) { private void GetEventPosition(string streamName, long eventNumber, IEnvelope envelope) { var streamId = _readIndex.GetStreamId(streamName); - var result = _readIndex.ReadEventInfo_KeepDuplicates(streamId, eventNumber); + var result = _readIndex.ReadEventInfo_KeepDuplicates(streamId, eventNumber, _tracker); var eventPositions = new EventPosition[result.EventInfos.Length]; diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/IReadIndex.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/IReadIndex.cs index 851d86192b5..78c429315af 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/IReadIndex.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/IReadIndex.cs @@ -51,32 +51,32 @@ public interface IReadIndex : IReadIndex { // - duplicates are removed, keeping only the earliest event in the log // - streamId drives the read, streamName is only for populating on the result. // this was less messy than safely adding the streamName to the EventRecord at some point after construction. - IndexReadEventResult ReadEvent(string streamName, TStreamId streamId, long eventNumber); - IndexReadStreamResult ReadStreamEventsBackward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount); - IndexReadStreamResult ReadStreamEventsForward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount); + IndexReadEventResult ReadEvent(string streamName, TStreamId streamId, long eventNumber, ITransactionFileTracker tracker); + IndexReadStreamResult ReadStreamEventsBackward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount, ITransactionFileTracker tracker); + IndexReadStreamResult ReadStreamEventsForward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount, ITransactionFileTracker tracker); // ReadEventInfo_KeepDuplicates() : // - deleted events are not filtered out // - duplicates are kept, in ascending order of log position // - next event number is always -1 - IndexReadEventInfoResult ReadEventInfo_KeepDuplicates(TStreamId streamId, long eventNumber); + IndexReadEventInfoResult ReadEventInfo_KeepDuplicates(TStreamId streamId, long eventNumber, ITransactionFileTracker tracker); // ReadEventInfo*Collisions() : // - deleted events are not filtered out // - duplicates are removed, keeping only the earliest event in the log // - only events that are before "beforePosition" in the transaction log are returned - IndexReadEventInfoResult ReadEventInfoForward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, long beforePosition); + IndexReadEventInfoResult ReadEventInfoForward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, long beforePosition, ITransactionFileTracker tracker); IndexReadEventInfoResult ReadEventInfoForward_NoCollisions(ulong stream, long fromEventNumber, int maxCount, long beforePosition); - IndexReadEventInfoResult ReadEventInfoBackward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, long beforePosition); - IndexReadEventInfoResult ReadEventInfoBackward_NoCollisions(ulong stream, Func getStreamId, long fromEventNumber, int maxCount, long beforePosition); + IndexReadEventInfoResult ReadEventInfoBackward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, long beforePosition, ITransactionFileTracker tracker); + IndexReadEventInfoResult ReadEventInfoBackward_NoCollisions(ulong stream, Func getStreamId, long fromEventNumber, int maxCount, long beforePosition, ITransactionFileTracker tracker); - bool IsStreamDeleted(TStreamId streamId); - long GetStreamLastEventNumber(TStreamId streamId); - long GetStreamLastEventNumber_KnownCollisions(TStreamId streamId, long beforePosition); - long GetStreamLastEventNumber_NoCollisions(ulong stream, Func getStreamId, long beforePosition); - StreamMetadata GetStreamMetadata(TStreamId streamId); - StorageMessage.EffectiveAcl GetEffectiveAcl(TStreamId streamId); - TStreamId GetEventStreamIdByTransactionId(long transactionId); + bool IsStreamDeleted(TStreamId streamId, ITransactionFileTracker tracker); + long GetStreamLastEventNumber(TStreamId streamId, ITransactionFileTracker tracker); + long GetStreamLastEventNumber_KnownCollisions(TStreamId streamId, long beforePosition, ITransactionFileTracker tracker); + long GetStreamLastEventNumber_NoCollisions(ulong stream, Func getStreamId, long beforePosition, ITransactionFileTracker tracker); + StreamMetadata GetStreamMetadata(TStreamId streamId, ITransactionFileTracker tracker); + StorageMessage.EffectiveAcl GetEffectiveAcl(TStreamId streamId, ITransactionFileTracker tracker); + TStreamId GetEventStreamIdByTransactionId(long transactionId, ITransactionFileTracker tracker); TStreamId GetStreamId(string streamName); string GetStreamName(TStreamId streamId); diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/ReadIndex.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/ReadIndex.cs index c23c5d06b51..51b23280987 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/ReadIndex.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/ReadIndex.cs @@ -99,28 +99,28 @@ public ReadIndex(IPublisher bus, RegisterHitsMisses(cacheTracker); } - IndexReadEventResult IReadIndex.ReadEvent(string streamName, TStreamId streamId, long eventNumber) { - return _indexReader.ReadEvent(streamName, streamId, eventNumber, ITransactionFileTracker.NoOp); + IndexReadEventResult IReadIndex.ReadEvent(string streamName, TStreamId streamId, long eventNumber, ITransactionFileTracker tracker) { + return _indexReader.ReadEvent(streamName, streamId, eventNumber, tracker); } - IndexReadStreamResult IReadIndex.ReadStreamEventsForward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount) { - return _indexReader.ReadStreamEventsForward(streamName, streamId, fromEventNumber, maxCount, ITransactionFileTracker.NoOp); + IndexReadStreamResult IReadIndex.ReadStreamEventsForward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount, ITransactionFileTracker tracker) { + return _indexReader.ReadStreamEventsForward(streamName, streamId, fromEventNumber, maxCount, tracker); } - IndexReadStreamResult IReadIndex.ReadStreamEventsBackward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount) { - return _indexReader.ReadStreamEventsBackward(streamName, streamId, fromEventNumber, maxCount, ITransactionFileTracker.NoOp); + IndexReadStreamResult IReadIndex.ReadStreamEventsBackward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount, ITransactionFileTracker tracker) { + return _indexReader.ReadStreamEventsBackward(streamName, streamId, fromEventNumber, maxCount, tracker); } TStreamId IReadIndex.GetStreamId(string streamName) { return _streamIds.LookupValue(streamName); } - public IndexReadEventInfoResult ReadEventInfo_KeepDuplicates(TStreamId streamId, long eventNumber) { - return _indexReader.ReadEventInfo_KeepDuplicates(streamId, eventNumber, ITransactionFileTracker.NoOp); + public IndexReadEventInfoResult ReadEventInfo_KeepDuplicates(TStreamId streamId, long eventNumber, ITransactionFileTracker tracker) { + return _indexReader.ReadEventInfo_KeepDuplicates(streamId, eventNumber, tracker); } - public IndexReadEventInfoResult ReadEventInfoForward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, long beforePosition) { - return _indexReader.ReadEventInfoForward_KnownCollisions(streamId, fromEventNumber, maxCount, beforePosition, ITransactionFileTracker.NoOp); + public IndexReadEventInfoResult ReadEventInfoForward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, long beforePosition, ITransactionFileTracker tracker) { + return _indexReader.ReadEventInfoForward_KnownCollisions(streamId, fromEventNumber, maxCount, beforePosition, tracker); } public IndexReadEventInfoResult ReadEventInfoForward_NoCollisions(ulong stream, long fromEventNumber, int maxCount, long beforePosition) { @@ -128,41 +128,41 @@ public IndexReadEventInfoResult ReadEventInfoForward_NoCollisions(ulong stream, } public IndexReadEventInfoResult ReadEventInfoBackward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, - long beforePosition) { - return _indexReader.ReadEventInfoBackward_KnownCollisions(streamId, fromEventNumber, maxCount, beforePosition, ITransactionFileTracker.NoOp); + long beforePosition, ITransactionFileTracker tracker) { + return _indexReader.ReadEventInfoBackward_KnownCollisions(streamId, fromEventNumber, maxCount, beforePosition, tracker); } public IndexReadEventInfoResult ReadEventInfoBackward_NoCollisions(ulong stream, Func getStreamId, - long fromEventNumber, int maxCount, long beforePosition) { - return _indexReader.ReadEventInfoBackward_NoCollisions(stream, getStreamId, fromEventNumber, maxCount, beforePosition, ITransactionFileTracker.NoOp); + long fromEventNumber, int maxCount, long beforePosition, ITransactionFileTracker tracker) { + return _indexReader.ReadEventInfoBackward_NoCollisions(stream, getStreamId, fromEventNumber, maxCount, beforePosition, tracker); } string IReadIndex.GetStreamName(TStreamId streamId) { return _streamNames.LookupName(streamId); } - bool IReadIndex.IsStreamDeleted(TStreamId streamId) { - return _indexReader.GetStreamLastEventNumber(streamId, ITransactionFileTracker.NoOp) == EventNumber.DeletedStream; + bool IReadIndex.IsStreamDeleted(TStreamId streamId, ITransactionFileTracker tracker) { + return _indexReader.GetStreamLastEventNumber(streamId, tracker) == EventNumber.DeletedStream; } - long IReadIndex.GetStreamLastEventNumber(TStreamId streamId) { - return _indexReader.GetStreamLastEventNumber(streamId, ITransactionFileTracker.NoOp); + long IReadIndex.GetStreamLastEventNumber(TStreamId streamId, ITransactionFileTracker tracker) { + return _indexReader.GetStreamLastEventNumber(streamId, tracker); } - public long GetStreamLastEventNumber_KnownCollisions(TStreamId streamId, long beforePosition) { - return _indexReader.GetStreamLastEventNumber_KnownCollisions(streamId, beforePosition, ITransactionFileTracker.NoOp); + public long GetStreamLastEventNumber_KnownCollisions(TStreamId streamId, long beforePosition, ITransactionFileTracker tracker) { + return _indexReader.GetStreamLastEventNumber_KnownCollisions(streamId, beforePosition, tracker); } - public long GetStreamLastEventNumber_NoCollisions(ulong stream, Func getStreamId, long beforePosition) { - return _indexReader.GetStreamLastEventNumber_NoCollisions(stream, getStreamId, beforePosition, ITransactionFileTracker.NoOp); + public long GetStreamLastEventNumber_NoCollisions(ulong stream, Func getStreamId, long beforePosition, ITransactionFileTracker tracker) { + return _indexReader.GetStreamLastEventNumber_NoCollisions(stream, getStreamId, beforePosition, tracker); } - StreamMetadata IReadIndex.GetStreamMetadata(TStreamId streamId) { - return _indexReader.GetStreamMetadata(streamId, ITransactionFileTracker.NoOp); + StreamMetadata IReadIndex.GetStreamMetadata(TStreamId streamId, ITransactionFileTracker tracker) { + return _indexReader.GetStreamMetadata(streamId, tracker); } - public TStreamId GetEventStreamIdByTransactionId(long transactionId) { - return _indexReader.GetEventStreamIdByTransactionId(transactionId, ITransactionFileTracker.NoOp); + public TStreamId GetEventStreamIdByTransactionId(long transactionId, ITransactionFileTracker tracker) { + return _indexReader.GetEventStreamIdByTransactionId(transactionId, tracker); } IndexReadAllResult IReadIndex.ReadAllEventsForward(TFPos pos, int maxCount, @@ -187,8 +187,8 @@ IndexReadAllResult IReadIndex.ReadAllEventsBackward(TFPos pos, int maxCount, return _allReader.ReadAllEventsBackward(pos, maxCount, tracker); } - public StorageMessage.EffectiveAcl GetEffectiveAcl(TStreamId streamId) { - return _indexReader.GetEffectiveAcl(streamId, ITransactionFileTracker.NoOp); + public StorageMessage.EffectiveAcl GetEffectiveAcl(TStreamId streamId, ITransactionFileTracker tracker) { + return _indexReader.GetEffectiveAcl(streamId, tracker); } void RegisterHitsMisses(ICacheHitsMissesTracker tracker) { diff --git a/src/EventStore.Core/Services/Storage/StorageReaderWorker.cs b/src/EventStore.Core/Services/Storage/StorageReaderWorker.cs index 202c6b681c0..3a6d6c115a2 100644 --- a/src/EventStore.Core/Services/Storage/StorageReaderWorker.cs +++ b/src/EventStore.Core/Services/Storage/StorageReaderWorker.cs @@ -319,7 +319,7 @@ public StorageReaderWorker( msg.Envelope.ReplyWith(new StorageMessage.OperationCancelledMessage(msg.CancellationToken)); return; } - var acl = _readIndex.GetEffectiveAcl(_readIndex.GetStreamId(msg.StreamId)); + var acl = _readIndex.GetEffectiveAcl(_readIndex.GetStreamId(msg.StreamId), ITransactionFileTracker.NoOp); msg.Envelope.ReplyWith(new StorageMessage.EffectiveStreamAclResponse(acl)); } @@ -328,7 +328,7 @@ private ClientMessage.ReadEventCompleted ReadEvent(ClientMessage.ReadEvent msg) try { var streamName = msg.EventStreamId; var streamId = _readIndex.GetStreamId(streamName); - var result = _readIndex.ReadEvent(streamName, streamId, msg.EventNumber); + var result = _readIndex.ReadEvent(streamName, streamId, msg.EventNumber, ITransactionFileTracker.NoOp); var record = result.Result == ReadEventResult.Success && msg.ResolveLinkTos ? ResolveLinkToEvent(result.Record, msg.User, null) : ResolvedEvent.ForUnresolvedEvent(result.Record); @@ -363,12 +363,12 @@ private ClientMessage.ReadStreamEventsForwardCompleted ReadStreamEventsForward( var streamName = msg.EventStreamId; var streamId = _readIndex.GetStreamId(msg.EventStreamId); if (msg.ValidationStreamVersion.HasValue && - _readIndex.GetStreamLastEventNumber(streamId) == msg.ValidationStreamVersion) + _readIndex.GetStreamLastEventNumber(streamId, ITransactionFileTracker.NoOp) == msg.ValidationStreamVersion) return NoData(msg, ReadStreamResult.NotModified, lastIndexPosition, msg.ValidationStreamVersion.Value); var result = - _readIndex.ReadStreamEventsForward(streamName, streamId, msg.FromEventNumber, msg.MaxCount); + _readIndex.ReadStreamEventsForward(streamName, streamId, msg.FromEventNumber, msg.MaxCount, ITransactionFileTracker.NoOp); CheckEventsOrder(msg, result); var resolvedPairs = ResolveLinkToEvents(result.Records, msg.ResolveLinkTos, msg.User); if (resolvedPairs == null) @@ -397,13 +397,13 @@ private ClientMessage.ReadStreamEventsBackwardCompleted ReadStreamEventsBackward var streamName = msg.EventStreamId; var streamId = _readIndex.GetStreamId(msg.EventStreamId); if (msg.ValidationStreamVersion.HasValue && - _readIndex.GetStreamLastEventNumber(streamId) == msg.ValidationStreamVersion) + _readIndex.GetStreamLastEventNumber(streamId, ITransactionFileTracker.NoOp) == msg.ValidationStreamVersion) return NoData(msg, ReadStreamResult.NotModified, lastIndexedPosition, msg.ValidationStreamVersion.Value); var result = _readIndex.ReadStreamEventsBackward(streamName, streamId, msg.FromEventNumber, - msg.MaxCount); + msg.MaxCount, ITransactionFileTracker.NoOp); CheckEventsOrder(msg, result); var resolvedPairs = ResolveLinkToEvents(result.Records, msg.ResolveLinkTos, msg.User); if (resolvedPairs == null) @@ -445,7 +445,7 @@ private ClientMessage.ReadAllEventsForwardCompleted if (resolved == null) return NoData(msg, ReadAllResult.AccessDenied, pos, lastIndexedPosition); - var metadata = _readIndex.GetStreamMetadata(_systemStreams.AllStream); + var metadata = _readIndex.GetStreamMetadata(_systemStreams.AllStream, ITransactionFileTracker.NoOp); return new ClientMessage.ReadAllEventsForwardCompleted( msg.CorrelationId, ReadAllResult.Success, null, resolved, metadata, false, msg.MaxCount, res.CurrentPos, res.NextPos, res.PrevPos, lastIndexedPosition); @@ -482,7 +482,7 @@ private ClientMessage.ReadAllEventsBackwardCompleted ReadAllEventsBackward( if (resolved == null) return NoData(msg, ReadAllResult.AccessDenied, pos, lastIndexedPosition); - var metadata = _readIndex.GetStreamMetadata(_systemStreams.AllStream); + var metadata = _readIndex.GetStreamMetadata(_systemStreams.AllStream, ITransactionFileTracker.NoOp); return new ClientMessage.ReadAllEventsBackwardCompleted( msg.CorrelationId, ReadAllResult.Success, null, resolved, metadata, false, msg.MaxCount, res.CurrentPos, res.NextPos, res.PrevPos, lastIndexedPosition); @@ -526,7 +526,7 @@ private ClientMessage.FilteredReadAllEventsForwardCompleted FilteredReadAllEvent return NoDataForFilteredCommand(msg, FilteredReadAllResult.AccessDenied, pos, lastIndexedPosition); - var metadata = _readIndex.GetStreamMetadata(_systemStreams.AllStream); + var metadata = _readIndex.GetStreamMetadata(_systemStreams.AllStream, ITransactionFileTracker.NoOp); return new ClientMessage.FilteredReadAllEventsForwardCompleted( msg.CorrelationId, FilteredReadAllResult.Success, null, resolved, metadata, false, msg.MaxCount, @@ -569,7 +569,7 @@ private ClientMessage.FilteredReadAllEventsBackwardCompleted FilteredReadAllEven return NoDataForFilteredCommand(msg, FilteredReadAllResult.AccessDenied, pos, lastIndexedPosition); - var metadata = _readIndex.GetStreamMetadata(_systemStreams.AllStream); + var metadata = _readIndex.GetStreamMetadata(_systemStreams.AllStream, ITransactionFileTracker.NoOp); return new ClientMessage.FilteredReadAllEventsBackwardCompleted( msg.CorrelationId, FilteredReadAllResult.Success, null, resolved, metadata, false, msg.MaxCount, @@ -679,7 +679,7 @@ private ResolvedEvent[] ResolveLinkToEvents(EventRecord[] records, bool resolveL if (long.TryParse(parts[0], out long eventNumber)) { var streamName = parts[1]; var streamId = _readIndex.GetStreamId(streamName); - var res = _readIndex.ReadEvent(streamName, streamId, eventNumber); + var res = _readIndex.ReadEvent(streamName, streamId, eventNumber, ITransactionFileTracker.NoOp); if (res.Result == ReadEventResult.Success) return ResolvedEvent.ForResolvedLink(res.Record, eventRecord, commitPosition); @@ -780,7 +780,7 @@ public void Handle(StorageMessage.StreamIdFromTransactionIdRequest message) { if (message.CancellationToken.IsCancellationRequested) { message.Envelope.ReplyWith(new StorageMessage.OperationCancelledMessage(message.CancellationToken)); } - var streamId = _readIndex.GetEventStreamIdByTransactionId(message.TransactionId); + var streamId = _readIndex.GetEventStreamIdByTransactionId(message.TransactionId, ITransactionFileTracker.NoOp); var streamName = _readIndex.GetStreamName(streamId); message.Envelope.ReplyWith(new StorageMessage.StreamIdFromTransactionIdResponse(streamName)); } diff --git a/src/EventStore.Core/Services/SubscriptionsService.cs b/src/EventStore.Core/Services/SubscriptionsService.cs index 96275247547..447a0d1aeb2 100644 --- a/src/EventStore.Core/Services/SubscriptionsService.cs +++ b/src/EventStore.Core/Services/SubscriptionsService.cs @@ -10,6 +10,7 @@ using System.Linq; using EventStore.Core.Util; using ILogger = Serilog.ILogger; +using EventStore.Core.TransactionLog; namespace EventStore.Core.Services { public enum SubscriptionDropReason { @@ -120,7 +121,7 @@ public void Handle(ClientMessage.SubscribeToStream msg) { if (isInMemoryStream) { lastEventNumber = -1; } else if (!msg.EventStreamId.IsEmptyString()) { - lastEventNumber = _readIndex.GetStreamLastEventNumber(_readIndex.GetStreamId(msg.EventStreamId)); + lastEventNumber = _readIndex.GetStreamLastEventNumber(_readIndex.GetStreamId(msg.EventStreamId), ITransactionFileTracker.NoOp); } var lastIndexedPos = isInMemoryStream ? -1 : _readIndex.LastIndexedPosition; @@ -140,7 +141,7 @@ public void Handle(ClientMessage.FilteredSubscribeToStream msg) { if (isInMemoryStream) { lastEventNumber = -1; } else if (!msg.EventStreamId.IsEmptyString()) { - lastEventNumber = _readIndex.GetStreamLastEventNumber(_readIndex.GetStreamId(msg.EventStreamId)); + lastEventNumber = _readIndex.GetStreamLastEventNumber(_readIndex.GetStreamId(msg.EventStreamId), ITransactionFileTracker.NoOp); } var lastIndexedPos = isInMemoryStream ? -1 : _readIndex.LastIndexedPosition; @@ -342,7 +343,7 @@ private ResolvedEvent ResolveLinkToEvent(EventRecord eventRecord, long commitPos long eventNumber = long.Parse(parts[0]); string streamName = parts[1]; var streamId = _readIndex.GetStreamId(streamName); - var res = _readIndex.ReadEvent(streamName, streamId, eventNumber); + var res = _readIndex.ReadEvent(streamName, streamId, eventNumber, ITransactionFileTracker.NoOp); if (res.Result == ReadEventResult.Success) return ResolvedEvent.ForResolvedLink(res.Record, eventRecord, commitPosition); diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunkScavenger.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunkScavenger.cs index 809f3390c6a..f4603c7d5f4 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunkScavenger.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunkScavenger.cs @@ -613,7 +613,7 @@ private bool ShouldKeepPrepare( return true; } - var lastEventNumber = _readIndex.GetStreamLastEventNumber(prepare.EventStreamId); + var lastEventNumber = _readIndex.GetStreamLastEventNumber(prepare.EventStreamId, ITransactionFileTracker.NoOp); if (lastEventNumber == EventNumber.DeletedStream) { // The stream is hard deleted but this is not the tombstone. // When all prepares and commit of transaction belong to single chunk and the stream is deleted, @@ -671,7 +671,7 @@ private bool ShouldKeepPrepare( return true; } - var meta = _readIndex.GetStreamMetadata(prepare.EventStreamId); + var meta = _readIndex.GetStreamMetadata(prepare.EventStreamId, ITransactionFileTracker.NoOp); bool canRemove = (meta.MaxCount.HasValue && eventNumber < lastEventNumber - meta.MaxCount.Value + 1) || (meta.TruncateBefore.HasValue && eventNumber < meta.TruncateBefore.Value) || (meta.MaxAge.HasValue && prepare.TimeStamp < DateTime.UtcNow - meta.MaxAge.Value); @@ -686,7 +686,7 @@ private bool ShouldKeepPrepare( } private bool DiscardBecauseDuplicate(IPrepareLogRecord prepare, long eventNumber) { - var result = _readIndex.ReadEvent(IndexReader.UnspecifiedStreamName, prepare.EventStreamId, eventNumber); + var result = _readIndex.ReadEvent(IndexReader.UnspecifiedStreamName, prepare.EventStreamId, eventNumber, ITransactionFileTracker.NoOp); if (result.Result == ReadEventResult.Success && result.Record.LogPosition != prepare.LogPosition) { // prepare isn't the record we get for an index read at its own stream/version. // therefore it is a duplicate that cannot be read from the index, discard it. @@ -701,13 +701,13 @@ private bool IsSoftDeletedTempStreamWithinSameChunk(TStreamId eventStreamId, lon TStreamId msh; if (_metastreams.IsMetaStream(eventStreamId)) { var originalStreamId = _metastreams.OriginalStreamOf(eventStreamId); - var meta = _readIndex.GetStreamMetadata(originalStreamId); + var meta = _readIndex.GetStreamMetadata(originalStreamId, ITransactionFileTracker.NoOp); if (meta.TruncateBefore != EventNumber.DeletedStream || meta.TempStream != true) return false; sh = originalStreamId; msh = eventStreamId; } else { - var meta = _readIndex.GetStreamMetadata(eventStreamId); + var meta = _readIndex.GetStreamMetadata(eventStreamId, ITransactionFileTracker.NoOp); if (meta.TruncateBefore != EventNumber.DeletedStream || meta.TempStream != true) return false; sh = eventStreamId; diff --git a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/IndexReaderForAccumulator.cs b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/IndexReaderForAccumulator.cs index 994fad8f075..5bd0f47c321 100644 --- a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/IndexReaderForAccumulator.cs +++ b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/IndexReaderForAccumulator.cs @@ -30,7 +30,7 @@ public IndexReadEventInfoResult ReadEventInfoForward( handle.StreamId, fromEventNumber, maxCount, - scavengePoint.Position); + scavengePoint.Position, ITransactionFileTracker.NoOp); default: throw new ArgumentOutOfRangeException(nameof(handle), handle, null); } @@ -52,14 +52,14 @@ public IndexReadEventInfoResult ReadEventInfoBackward( _ => streamId, fromEventNumber, maxCount, - scavengePoint.Position); + scavengePoint.Position, ITransactionFileTracker.NoOp); case StreamHandle.Kind.Id: // uses log to check for hash collisions return _readIndex.ReadEventInfoBackward_KnownCollisions( handle.StreamId, fromEventNumber, maxCount, - scavengePoint.Position); + scavengePoint.Position, ITransactionFileTracker.NoOp); default: throw new ArgumentOutOfRangeException(nameof(handle), handle, null); } diff --git a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/IndexReaderForCalculator.cs b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/IndexReaderForCalculator.cs index 06a9a237284..6edcf0765fa 100644 --- a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/IndexReaderForCalculator.cs +++ b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/IndexReaderForCalculator.cs @@ -28,12 +28,12 @@ public long GetLastEventNumber( return _readIndex.GetStreamLastEventNumber_NoCollisions( handle.StreamHash, _lookupUniqueHashUser, - scavengePoint.Position); + scavengePoint.Position, ITransactionFileTracker.NoOp); case StreamHandle.Kind.Id: // uses the index and the log to fetch the last event number return _readIndex.GetStreamLastEventNumber_KnownCollisions( handle.StreamId, - scavengePoint.Position); + scavengePoint.Position, ITransactionFileTracker.NoOp); default: throw new ArgumentOutOfRangeException(nameof(handle), handle, null); } @@ -59,7 +59,7 @@ public IndexReadEventInfoResult ReadEventInfoForward( handle.StreamId, fromEventNumber, maxCount, - scavengePoint.Position); + scavengePoint.Position, ITransactionFileTracker.NoOp); default: throw new ArgumentOutOfRangeException(nameof(handle), handle, null); } From 04918f2e2d4f09030f52bc25e125e8b8010a3371 Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Fri, 22 Nov 2024 16:01:01 +0000 Subject: [PATCH 20/38] more plumbing (Persistent subscription service done) --- .../PersistentSubscriptionTests.cs | 4 +++- .../PersistentSubscription/when_an_error_occurs.cs | 4 +++- src/EventStore.Core/ClusterVNode.cs | 2 +- .../PersistentSubscriptionService.cs | 11 +++++++---- .../Services/UserManagement/SystemAccounts.cs | 1 + 5 files changed, 15 insertions(+), 7 deletions(-) diff --git a/src/EventStore.Core.Tests/Services/PersistentSubscription/PersistentSubscriptionTests.cs b/src/EventStore.Core.Tests/Services/PersistentSubscription/PersistentSubscriptionTests.cs index 936780a8f26..24dca736e20 100644 --- a/src/EventStore.Core.Tests/Services/PersistentSubscription/PersistentSubscriptionTests.cs +++ b/src/EventStore.Core.Tests/Services/PersistentSubscription/PersistentSubscriptionTests.cs @@ -26,6 +26,7 @@ using EventStore.Core.Tests.TransactionLog; using EventFilter = EventStore.Core.Services.Storage.ReaderIndex.EventFilter; using StreamMetadata = EventStore.Core.Data.StreamMetadata; +using EventStore.Core.TransactionLog; namespace EventStore.Core.Tests.Services.PersistentSubscription { public enum EventSource { @@ -189,7 +190,8 @@ public when_updating_all_stream_subscription_with_filter() { new FakeReadIndex(_ => false, new MetaStreamLookup()), ioDispatcher, bus, new PersistentSubscriptionConsumerStrategyRegistry(bus, bus, - Array.Empty())); + Array.Empty()), + ITransactionFileTrackerFactory.NoOp); _sut.Start(); _sut.Handle(new SystemMessage.BecomeLeader(correlationId: Guid.NewGuid())); diff --git a/src/EventStore.Core.Tests/Services/PersistentSubscription/when_an_error_occurs.cs b/src/EventStore.Core.Tests/Services/PersistentSubscription/when_an_error_occurs.cs index 09d30b6e77d..ae6130816d9 100644 --- a/src/EventStore.Core.Tests/Services/PersistentSubscription/when_an_error_occurs.cs +++ b/src/EventStore.Core.Tests/Services/PersistentSubscription/when_an_error_occurs.cs @@ -10,6 +10,7 @@ using EventStore.Core.Services.PersistentSubscription; using EventStore.Core.Services.PersistentSubscription.ConsumerStrategy; using EventStore.Core.Tests.TransactionLog; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.Services.PersistentSubscription { @@ -34,7 +35,8 @@ protected when_an_error_occurs(TResult expectedResult) { new FakeReadIndex(_ => false, new MetaStreamLookup()), new IODispatcher(bus, new PublishEnvelope(bus)), bus, new PersistentSubscriptionConsumerStrategyRegistry(bus, bus, - Array.Empty())); + Array.Empty()), + ITransactionFileTrackerFactory.NoOp); _envelope = new CallbackEnvelope(_replySource.SetResult); _sut.Start(); } diff --git a/src/EventStore.Core/ClusterVNode.cs b/src/EventStore.Core/ClusterVNode.cs index c9797ac0ae3..6fab99178b9 100644 --- a/src/EventStore.Core/ClusterVNode.cs +++ b/src/EventStore.Core/ClusterVNode.cs @@ -1191,7 +1191,7 @@ GossipAdvertiseInfo GetGossipAdvertiseInfo() { var consumerStrategyRegistry = new PersistentSubscriptionConsumerStrategyRegistry(_mainQueue, _mainBus, additionalPersistentSubscriptionConsumerStrategyFactories); var persistentSubscription = new PersistentSubscriptionService(perSubscrQueue, readIndex, psubDispatcher, - _mainQueue, consumerStrategyRegistry); + _mainQueue, consumerStrategyRegistry, trackers.TransactionFileTrackers); perSubscrBus.Subscribe(persistentSubscription); perSubscrBus.Subscribe(persistentSubscription); perSubscrBus.Subscribe(persistentSubscription); diff --git a/src/EventStore.Core/Services/PersistentSubscription/PersistentSubscriptionService.cs b/src/EventStore.Core/Services/PersistentSubscription/PersistentSubscriptionService.cs index 409541b733e..ca210adafa3 100644 --- a/src/EventStore.Core/Services/PersistentSubscription/PersistentSubscriptionService.cs +++ b/src/EventStore.Core/Services/PersistentSubscription/PersistentSubscriptionService.cs @@ -58,6 +58,7 @@ public class PersistentSubscriptionService : private readonly IODispatcher _ioDispatcher; private readonly IPublisher _bus; private readonly PersistentSubscriptionConsumerStrategyRegistry _consumerStrategyRegistry; + private readonly ITransactionFileTracker _tfTracker; private readonly IPersistentSubscriptionCheckpointReader _checkpointReader; private readonly IPersistentSubscriptionStreamReader _streamReader; private PersistentSubscriptionConfig _config = new PersistentSubscriptionConfig(); @@ -68,7 +69,8 @@ public class PersistentSubscriptionService : public PersistentSubscriptionService(IQueuedHandler queuedHandler, IReadIndex readIndex, IODispatcher ioDispatcher, IPublisher bus, - PersistentSubscriptionConsumerStrategyRegistry consumerStrategyRegistry) { + PersistentSubscriptionConsumerStrategyRegistry consumerStrategyRegistry, + ITransactionFileTrackerFactory tfTrackers) { Ensure.NotNull(queuedHandler, "queuedHandler"); Ensure.NotNull(readIndex, "readIndex"); Ensure.NotNull(ioDispatcher, "ioDispatcher"); @@ -78,6 +80,7 @@ public PersistentSubscriptionService(IQueuedHandler queuedHandler, IReadIndex Date: Sat, 23 Nov 2024 09:06:04 +0000 Subject: [PATCH 21/38] more plumbing (StorageReaderWorker done) --- ...actionFileTrackerFactoryExtensionsTests.cs | 65 +++++++++++++++++++ ...TransactionFileTrackerFactoryExtensions.cs | 24 +++++++ src/EventStore.Core/Messaging/IEnvelope.cs | 5 ++ .../Services/Storage/StorageReaderWorker.cs | 39 ++++++----- 4 files changed, 115 insertions(+), 18 deletions(-) create mode 100644 src/EventStore.Core.XUnit.Tests/Messages/ITransactionFileTrackerFactoryExtensionsTests.cs create mode 100644 src/EventStore.Core/Messages/ITransactionFileTrackerFactoryExtensions.cs diff --git a/src/EventStore.Core.XUnit.Tests/Messages/ITransactionFileTrackerFactoryExtensionsTests.cs b/src/EventStore.Core.XUnit.Tests/Messages/ITransactionFileTrackerFactoryExtensionsTests.cs new file mode 100644 index 00000000000..7737bc014c5 --- /dev/null +++ b/src/EventStore.Core.XUnit.Tests/Messages/ITransactionFileTrackerFactoryExtensionsTests.cs @@ -0,0 +1,65 @@ +using System; +using System.Security.Claims; +using EventStore.Core.Messages; +using EventStore.Core.Messaging; +using EventStore.Core.Services.UserManagement; +using EventStore.Core.TransactionLog; +using EventStore.Core.TransactionLog.LogRecords; +using Xunit; + +namespace EventStore.Core.XUnit.Tests.Messages; + +public class ITransactionFileTrackerFactoryExtensionsTests { + readonly ITransactionFileTrackerFactory _factory = new FakeFactory(); + + [Fact] + public void can_get_for_username() { + var tracker = _factory.For(SystemAccounts.SystemName) as FakeTracker; + Assert.Equal("system", tracker.Username); + } + + [Fact] + public void can_get_for_claims_principal() { + var tracker = _factory.For(SystemAccounts.System) as FakeTracker; + Assert.Equal("system", tracker.Username); + } + + [Fact] + public void can_get_for_request_message() { + var tracker = _factory.For(new FakeReadRequest(SystemAccounts.System)) as FakeTracker; + Assert.Equal("system", tracker.Username); + } + + [Fact] + public void can_get_for_null_username() { + var tracker = _factory.For((string)null) as FakeTracker; + Assert.Equal("anonymous", tracker.Username); + } + + [Fact] + public void can_get_for_null_claims_principal() { + var tracker = _factory.For((ClaimsPrincipal)null) as FakeTracker; + Assert.Equal("anonymous", tracker.Username); + } + + [Fact] + public void can_get_for_null_request_message() { + var tracker = _factory.For((FakeReadRequest)null) as FakeTracker; + Assert.Equal("anonymous", tracker.Username); + } + + class FakeFactory : ITransactionFileTrackerFactory { + public ITransactionFileTracker GetOrAdd(string name) => new FakeTracker(name); + } + + record FakeTracker(string Username) : ITransactionFileTracker { + public void OnRead(ILogRecord record, ITransactionFileTracker.Source source) { + } + } + + class FakeReadRequest : ClientMessage.ReadRequestMessage { + public FakeReadRequest(ClaimsPrincipal user) : + base(Guid.NewGuid(), Guid.NewGuid(), IEnvelope.NoOp, user, expires: null) { + } + } +} diff --git a/src/EventStore.Core/Messages/ITransactionFileTrackerFactoryExtensions.cs b/src/EventStore.Core/Messages/ITransactionFileTrackerFactoryExtensions.cs new file mode 100644 index 00000000000..73e23ad59ad --- /dev/null +++ b/src/EventStore.Core/Messages/ITransactionFileTrackerFactoryExtensions.cs @@ -0,0 +1,24 @@ +using System.Security.Claims; +using EventStore.Core.TransactionLog; + +namespace EventStore.Core.Messages; + +public static class ITransactionFileTrackerFactoryExtensions { + public static ITransactionFileTracker For( + this ITransactionFileTrackerFactory factory, + string username) => + + factory.GetOrAdd(username ?? "anonymous"); + + public static ITransactionFileTracker For( + this ITransactionFileTrackerFactory factory, + ClaimsPrincipal user) => + + factory.For(user?.Identity?.Name); + + public static ITransactionFileTracker For( + this ITransactionFileTrackerFactory factory, + ClientMessage.ReadRequestMessage msg) => + + factory.For(msg?.User); +} diff --git a/src/EventStore.Core/Messaging/IEnvelope.cs b/src/EventStore.Core/Messaging/IEnvelope.cs index 7ab44044a81..96fc3a63a37 100644 --- a/src/EventStore.Core/Messaging/IEnvelope.cs +++ b/src/EventStore.Core/Messaging/IEnvelope.cs @@ -4,5 +4,10 @@ public interface IEnvelope { } public interface IEnvelope : IEnvelope { + static readonly IEnvelope NoOp = new NoOp(); + } + + file class NoOp : IEnvelope { + public void ReplyWith(U message) where U : Message { } } } diff --git a/src/EventStore.Core/Services/Storage/StorageReaderWorker.cs b/src/EventStore.Core/Services/Storage/StorageReaderWorker.cs index 3a6d6c115a2..ae36c22dc54 100644 --- a/src/EventStore.Core/Services/Storage/StorageReaderWorker.cs +++ b/src/EventStore.Core/Services/Storage/StorageReaderWorker.cs @@ -15,6 +15,7 @@ using ILogger = Serilog.ILogger; using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks; +using EventStore.Core.Services.UserManagement; namespace EventStore.Core.Services.Storage { public abstract class StorageReaderWorker { @@ -319,7 +320,7 @@ public StorageReaderWorker( msg.Envelope.ReplyWith(new StorageMessage.OperationCancelledMessage(msg.CancellationToken)); return; } - var acl = _readIndex.GetEffectiveAcl(_readIndex.GetStreamId(msg.StreamId), ITransactionFileTracker.NoOp); + var acl = _readIndex.GetEffectiveAcl(_readIndex.GetStreamId(msg.StreamId), _trackers.For(SystemAccounts.SystemName)); msg.Envelope.ReplyWith(new StorageMessage.EffectiveStreamAclResponse(acl)); } @@ -328,7 +329,7 @@ private ClientMessage.ReadEventCompleted ReadEvent(ClientMessage.ReadEvent msg) try { var streamName = msg.EventStreamId; var streamId = _readIndex.GetStreamId(streamName); - var result = _readIndex.ReadEvent(streamName, streamId, msg.EventNumber, ITransactionFileTracker.NoOp); + var result = _readIndex.ReadEvent(streamName, streamId, msg.EventNumber, _trackers.For(msg)); var record = result.Result == ReadEventResult.Success && msg.ResolveLinkTos ? ResolveLinkToEvent(result.Record, msg.User, null) : ResolvedEvent.ForUnresolvedEvent(result.Record); @@ -360,15 +361,16 @@ private ClientMessage.ReadStreamEventsForwardCompleted ReadStreamEventsForward( throw new ArgumentException($"Read size too big, should be less than {MaxPageSize} items"); } + var tracker = _trackers.For(msg); var streamName = msg.EventStreamId; var streamId = _readIndex.GetStreamId(msg.EventStreamId); if (msg.ValidationStreamVersion.HasValue && - _readIndex.GetStreamLastEventNumber(streamId, ITransactionFileTracker.NoOp) == msg.ValidationStreamVersion) + _readIndex.GetStreamLastEventNumber(streamId, tracker) == msg.ValidationStreamVersion) return NoData(msg, ReadStreamResult.NotModified, lastIndexPosition, msg.ValidationStreamVersion.Value); var result = - _readIndex.ReadStreamEventsForward(streamName, streamId, msg.FromEventNumber, msg.MaxCount, ITransactionFileTracker.NoOp); + _readIndex.ReadStreamEventsForward(streamName, streamId, msg.FromEventNumber, msg.MaxCount, tracker); CheckEventsOrder(msg, result); var resolvedPairs = ResolveLinkToEvents(result.Records, msg.ResolveLinkTos, msg.User); if (resolvedPairs == null) @@ -394,16 +396,17 @@ private ClientMessage.ReadStreamEventsBackwardCompleted ReadStreamEventsBackward throw new ArgumentException($"Read size too big, should be less than {MaxPageSize} items"); } + var tracker = _trackers.For(msg); var streamName = msg.EventStreamId; var streamId = _readIndex.GetStreamId(msg.EventStreamId); if (msg.ValidationStreamVersion.HasValue && - _readIndex.GetStreamLastEventNumber(streamId, ITransactionFileTracker.NoOp) == msg.ValidationStreamVersion) + _readIndex.GetStreamLastEventNumber(streamId, tracker) == msg.ValidationStreamVersion) return NoData(msg, ReadStreamResult.NotModified, lastIndexedPosition, msg.ValidationStreamVersion.Value); var result = _readIndex.ReadStreamEventsBackward(streamName, streamId, msg.FromEventNumber, - msg.MaxCount, ITransactionFileTracker.NoOp); + msg.MaxCount, tracker); CheckEventsOrder(msg, result); var resolvedPairs = ResolveLinkToEvents(result.Records, msg.ResolveLinkTos, msg.User); if (resolvedPairs == null) @@ -440,12 +443,13 @@ private ClientMessage.ReadAllEventsForwardCompleted if (msg.ValidationTfLastCommitPosition == lastIndexedPosition) return NoData(msg, ReadAllResult.NotModified, pos, lastIndexedPosition); - var res = _readIndex.ReadAllEventsForward(pos, msg.MaxCount, ITransactionFileTracker.NoOp); //qq + var tracker = _trackers.For(msg); + var res = _readIndex.ReadAllEventsForward(pos, msg.MaxCount, tracker); var resolved = ResolveReadAllResult(res.Records, msg.ResolveLinkTos, msg.User); if (resolved == null) return NoData(msg, ReadAllResult.AccessDenied, pos, lastIndexedPosition); - var metadata = _readIndex.GetStreamMetadata(_systemStreams.AllStream, ITransactionFileTracker.NoOp); + var metadata = _readIndex.GetStreamMetadata(_systemStreams.AllStream, tracker); return new ClientMessage.ReadAllEventsForwardCompleted( msg.CorrelationId, ReadAllResult.Success, null, resolved, metadata, false, msg.MaxCount, res.CurrentPos, res.NextPos, res.PrevPos, lastIndexedPosition); @@ -477,12 +481,12 @@ private ClientMessage.ReadAllEventsBackwardCompleted ReadAllEventsBackward( return NoData(msg, ReadAllResult.NotModified, pos, lastIndexedPosition); var tracker = _trackers.GetOrAdd(msg.User.Identity.Name); - var res = _readIndex.ReadAllEventsBackward(pos, msg.MaxCount, ITransactionFileTracker.NoOp); + var res = _readIndex.ReadAllEventsBackward(pos, msg.MaxCount, tracker); var resolved = ResolveReadAllResult(res.Records, msg.ResolveLinkTos, msg.User); if (resolved == null) return NoData(msg, ReadAllResult.AccessDenied, pos, lastIndexedPosition); - var metadata = _readIndex.GetStreamMetadata(_systemStreams.AllStream, ITransactionFileTracker.NoOp); + var metadata = _readIndex.GetStreamMetadata(_systemStreams.AllStream, tracker); return new ClientMessage.ReadAllEventsBackwardCompleted( msg.CorrelationId, ReadAllResult.Success, null, resolved, metadata, false, msg.MaxCount, res.CurrentPos, res.NextPos, res.PrevPos, lastIndexedPosition); @@ -515,9 +519,7 @@ private ClientMessage.FilteredReadAllEventsForwardCompleted FilteredReadAllEvent return NoDataForFilteredCommand(msg, FilteredReadAllResult.NotModified, pos, lastIndexedPosition); - //qq is all this info necessarily here? do all messages have a non null user, do all users (cps) have names - var tracker = _trackers.GetOrAdd(msg.User.Identity.Name); - + var tracker = _trackers.For(msg); ; var res = _readIndex.ReadAllEventsForwardFiltered(pos, msg.MaxCount, msg.MaxSearchWindow, msg.EventFilter, tracker); @@ -526,7 +528,7 @@ private ClientMessage.FilteredReadAllEventsForwardCompleted FilteredReadAllEvent return NoDataForFilteredCommand(msg, FilteredReadAllResult.AccessDenied, pos, lastIndexedPosition); - var metadata = _readIndex.GetStreamMetadata(_systemStreams.AllStream, ITransactionFileTracker.NoOp); + var metadata = _readIndex.GetStreamMetadata(_systemStreams.AllStream, tracker); return new ClientMessage.FilteredReadAllEventsForwardCompleted( msg.CorrelationId, FilteredReadAllResult.Success, null, resolved, metadata, false, msg.MaxCount, @@ -562,14 +564,15 @@ private ClientMessage.FilteredReadAllEventsBackwardCompleted FilteredReadAllEven return NoDataForFilteredCommand(msg, FilteredReadAllResult.NotModified, pos, lastIndexedPosition); + var tracker = _trackers.For(msg); var res = _readIndex.ReadAllEventsBackwardFiltered(pos, msg.MaxCount, msg.MaxSearchWindow, - msg.EventFilter, ITransactionFileTracker.NoOp); //qq + msg.EventFilter, tracker); var resolved = ResolveReadAllResult(res.Records, msg.ResolveLinkTos, msg.User); if (resolved == null) return NoDataForFilteredCommand(msg, FilteredReadAllResult.AccessDenied, pos, lastIndexedPosition); - var metadata = _readIndex.GetStreamMetadata(_systemStreams.AllStream, ITransactionFileTracker.NoOp); + var metadata = _readIndex.GetStreamMetadata(_systemStreams.AllStream, tracker); return new ClientMessage.FilteredReadAllEventsBackwardCompleted( msg.CorrelationId, FilteredReadAllResult.Success, null, resolved, metadata, false, msg.MaxCount, @@ -679,7 +682,7 @@ private ResolvedEvent[] ResolveLinkToEvents(EventRecord[] records, bool resolveL if (long.TryParse(parts[0], out long eventNumber)) { var streamName = parts[1]; var streamId = _readIndex.GetStreamId(streamName); - var res = _readIndex.ReadEvent(streamName, streamId, eventNumber, ITransactionFileTracker.NoOp); + var res = _readIndex.ReadEvent(streamName, streamId, eventNumber, _trackers.For(user)); if (res.Result == ReadEventResult.Success) return ResolvedEvent.ForResolvedLink(res.Record, eventRecord, commitPosition); @@ -780,7 +783,7 @@ public void Handle(StorageMessage.StreamIdFromTransactionIdRequest message) { if (message.CancellationToken.IsCancellationRequested) { message.Envelope.ReplyWith(new StorageMessage.OperationCancelledMessage(message.CancellationToken)); } - var streamId = _readIndex.GetEventStreamIdByTransactionId(message.TransactionId, ITransactionFileTracker.NoOp); + var streamId = _readIndex.GetEventStreamIdByTransactionId(message.TransactionId, _trackers.For(SystemAccounts.SystemName)); var streamName = _readIndex.GetStreamName(streamId); message.Envelope.ReplyWith(new StorageMessage.StreamIdFromTransactionIdResponse(streamName)); } From 8636ac7432cb51e5693d5d6393dc6518a04ebaa5 Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Sat, 23 Nov 2024 09:17:00 +0000 Subject: [PATCH 22/38] more plumbing (old Scavenge done) --- .../Services/Storage/ReadIndexTestScenario.cs | 3 +- .../Helpers/ScavengeLifeCycleScenario.cs | 3 +- .../Helpers/ScavengeTestScenario.cs | 1 + ...venged_tfchunk_with_all_records_removed.cs | 2 +- src/EventStore.Core/ClusterVNode.cs | 1 + .../Services/UserManagement/SystemAccounts.cs | 1 + .../TransactionLog/Chunks/TFChunkScavenger.cs | 31 +++++++++++++------ .../DbAccess/OldScavengeChunkMergerBackend.cs | 1 + 8 files changed, 30 insertions(+), 13 deletions(-) diff --git a/src/EventStore.Core.Tests/Services/Storage/ReadIndexTestScenario.cs b/src/EventStore.Core.Tests/Services/Storage/ReadIndexTestScenario.cs index 546d6439f74..8f5cd367618 100644 --- a/src/EventStore.Core.Tests/Services/Storage/ReadIndexTestScenario.cs +++ b/src/EventStore.Core.Tests/Services/Storage/ReadIndexTestScenario.cs @@ -153,7 +153,8 @@ public override async Task TestFixtureSetUp() { if (_scavenge) { if (_completeLastChunkOnScavenge) Db.Manager.GetChunk(Db.Manager.ChunksCount - 1).Complete(); - _scavenger = new TFChunkScavenger(Serilog.Log.Logger, Db, new FakeTFScavengerLog(), TableIndex, ReadIndex, _logFormat.Metastreams); + _scavenger = new TFChunkScavenger(Serilog.Log.Logger, Db, new FakeTFScavengerLog(), TableIndex, ReadIndex, _logFormat.Metastreams, + ITransactionFileTrackerFactory.NoOp); await _scavenger.Scavenge(alwaysKeepScavenged: true, mergeChunks: _mergeChunks, scavengeIndex: _scavengeIndex); } diff --git a/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeLifeCycleScenario.cs b/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeLifeCycleScenario.cs index f9a1bfcd1a3..80973948ef1 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeLifeCycleScenario.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeLifeCycleScenario.cs @@ -2,6 +2,7 @@ using System.Threading.Tasks; using EventStore.Core.LogAbstraction; using EventStore.Core.Tests.Services.Storage; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks; using NUnit.Framework; @@ -42,7 +43,7 @@ public override async Task TestFixtureSetUp() { Log = new FakeTFScavengerLog(); FakeTableIndex = new FakeTableIndex(); TfChunkScavenger = new TFChunkScavenger(Serilog.Log.Logger, _dbResult.Db, Log, FakeTableIndex, new FakeReadIndex(_ => false, _logFormat.Metastreams), - _logFormat.Metastreams); + _logFormat.Metastreams, ITransactionFileTrackerFactory.NoOp); try { await When().WithTimeout(TimeSpan.FromMinutes(1)); diff --git a/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeTestScenario.cs b/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeTestScenario.cs index f0e0785800b..63e0242b0e3 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeTestScenario.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeTestScenario.cs @@ -95,6 +95,7 @@ public override async Task TestFixtureSetUp() { var scavenger = new TFChunkScavenger(Serilog.Log.Logger, _dbResult.Db, new FakeTFScavengerLog(), tableIndex, ReadIndex, _logFormat.Metastreams, + ITransactionFileTrackerFactory.NoOp, unsafeIgnoreHardDeletes: UnsafeIgnoreHardDelete()); await scavenger.Scavenge(alwaysKeepScavenged: true, mergeChunks: false); } diff --git a/src/EventStore.Core.Tests/TransactionLog/when_having_scavenged_tfchunk_with_all_records_removed.cs b/src/EventStore.Core.Tests/TransactionLog/when_having_scavenged_tfchunk_with_all_records_removed.cs index 8ec706516fa..dde4e161829 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_having_scavenged_tfchunk_with_all_records_removed.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_having_scavenged_tfchunk_with_all_records_removed.cs @@ -83,7 +83,7 @@ public override async Task TestFixtureSetUp() { var scavenger = new TFChunkScavenger(Serilog.Log.Logger, _db, new FakeTFScavengerLog(), new FakeTableIndex(), new FakeReadIndex(x => EqualityComparer.Default.Equals(x, streamId), _logFormat.Metastreams), - _logFormat.Metastreams); + _logFormat.Metastreams, ITransactionFileTrackerFactory.NoOp); await scavenger.Scavenge(alwaysKeepScavenged: true, mergeChunks: false); _scavengedChunk = _db.Manager.GetChunk(0); diff --git a/src/EventStore.Core/ClusterVNode.cs b/src/EventStore.Core/ClusterVNode.cs index 6fab99178b9..49eb7958d3f 100644 --- a/src/EventStore.Core/ClusterVNode.cs +++ b/src/EventStore.Core/ClusterVNode.cs @@ -1380,6 +1380,7 @@ GossipAdvertiseInfo GetGossipAdvertiseInfo() { tableIndex: tableIndex, readIndex: readIndex, metastreams: logFormat.SystemStreams, + tfTrackers: trackers.TransactionFileTrackers, unsafeIgnoreHardDeletes: options.Database.UnsafeIgnoreHardDelete, threads: message.Threads))); } diff --git a/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs b/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs index 5b5fbbdbc57..f41801d81f2 100644 --- a/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs +++ b/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs @@ -18,5 +18,6 @@ public class SystemAccounts { public static readonly string SystemRedactionName = "system-redaction"; public static readonly string SystemIndexCommitterName = "system-index-committer"; public static readonly string SystemPersistentSubscriptionsName = "system-persistent-subscriptions"; + public static readonly string SystemScavengeName = "system-scavenge"; } } diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunkScavenger.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunkScavenger.cs index f4603c7d5f4..39bb84c071d 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunkScavenger.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunkScavenger.cs @@ -12,6 +12,7 @@ using EventStore.Core.Index; using EventStore.Core.LogAbstraction; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.Services.UserManagement; using EventStore.Core.TransactionLog.Chunks.TFChunk; using EventStore.Core.TransactionLog.LogRecords; using EventStore.Core.TransactionLog.Scavenging; @@ -33,12 +34,15 @@ public class TFChunkScavenger : TFChunkScavenger { private readonly ITableIndex _tableIndex; private readonly IReadIndex _readIndex; private readonly IMetastreamLookup _metastreams; + private readonly ITransactionFileTracker _tfTracker; private readonly long _maxChunkDataSize; private readonly bool _unsafeIgnoreHardDeletes; private readonly int _threads; public TFChunkScavenger(ILogger logger, TFChunkDb db, ITFChunkScavengerLog scavengerLog, ITableIndex tableIndex, - IReadIndex readIndex, IMetastreamLookup metastreams, long? maxChunkDataSize = null, + IReadIndex readIndex, IMetastreamLookup metastreams, + ITransactionFileTrackerFactory tfTrackers, + long? maxChunkDataSize = null, bool unsafeIgnoreHardDeletes = false, int threads = 1) { Ensure.NotNull(logger, nameof(logger)); Ensure.NotNull(db, "db"); @@ -62,6 +66,7 @@ public TFChunkScavenger(ILogger logger, TFChunkDb db, ITFChunkScavengerLog scave _tableIndex = tableIndex; _readIndex = readIndex; _metastreams = metastreams; + _tfTracker = tfTrackers.GetOrAdd(SystemAccounts.SystemScavengeName); _maxChunkDataSize = maxChunkDataSize ?? db.Config.ChunkSize; _unsafeIgnoreHardDeletes = unsafeIgnoreHardDeletes; _threads = threads; @@ -158,6 +163,7 @@ private void ScavengeInternal(bool alwaysKeepScavenged, bool mergeChunks, int st maxChunkDataSize: _maxChunkDataSize, scavengerLog: _scavengerLog, throttle: new Throttle(_logger, TimeSpan.Zero, TimeSpan.Zero, 100), + tracker: _tfTracker, ct: ct); } @@ -204,7 +210,7 @@ private void ScavengeChunk(bool alwaysKeepScavenged, TFChunk.TFChunk oldChunk, } try { - TraverseChunkBasic(oldChunk, ct, + TraverseChunkBasic(oldChunk, ct, _tfTracker, result => { threadLocalCache.Records.Add(result); @@ -337,6 +343,7 @@ public static void MergePhase( long maxChunkDataSize, ITFChunkScavengerLog scavengerLog, Throttle throttle, + ITransactionFileTracker tracker, CancellationToken ct) { bool mergedSomething; @@ -362,6 +369,7 @@ public static void MergePhase( db: db, scavengerLog: scavengerLog, oldChunks: chunksToMerge, + tracker: tracker, ct: ct)) { mergedSomething = true; @@ -382,6 +390,7 @@ public static void MergePhase( db: db, scavengerLog: scavengerLog, oldChunks: chunksToMerge, + tracker: tracker, ct: ct)) { mergedSomething = true; @@ -398,6 +407,7 @@ private static bool MergeChunks( TFChunkDb db, ITFChunkScavengerLog scavengerLog, IList oldChunks, + ITransactionFileTracker tracker, CancellationToken ct) { if (oldChunks.IsEmpty()) throw new ArgumentException("Provided list of chunks to merge is empty."); @@ -444,7 +454,7 @@ private static bool MergeChunks( var positionMapping = new List(); foreach (var oldChunk in oldChunks) { var lastFlushedPage = -1; - TraverseChunkBasic(oldChunk, ct, + TraverseChunkBasic(oldChunk, ct, tracker, result => { positionMapping.Add(WriteRecord(newChunk, result.LogRecord)); @@ -613,7 +623,7 @@ private bool ShouldKeepPrepare( return true; } - var lastEventNumber = _readIndex.GetStreamLastEventNumber(prepare.EventStreamId, ITransactionFileTracker.NoOp); + var lastEventNumber = _readIndex.GetStreamLastEventNumber(prepare.EventStreamId, _tfTracker); if (lastEventNumber == EventNumber.DeletedStream) { // The stream is hard deleted but this is not the tombstone. // When all prepares and commit of transaction belong to single chunk and the stream is deleted, @@ -671,7 +681,7 @@ private bool ShouldKeepPrepare( return true; } - var meta = _readIndex.GetStreamMetadata(prepare.EventStreamId, ITransactionFileTracker.NoOp); + var meta = _readIndex.GetStreamMetadata(prepare.EventStreamId, _tfTracker); bool canRemove = (meta.MaxCount.HasValue && eventNumber < lastEventNumber - meta.MaxCount.Value + 1) || (meta.TruncateBefore.HasValue && eventNumber < meta.TruncateBefore.Value) || (meta.MaxAge.HasValue && prepare.TimeStamp < DateTime.UtcNow - meta.MaxAge.Value); @@ -686,7 +696,7 @@ private bool ShouldKeepPrepare( } private bool DiscardBecauseDuplicate(IPrepareLogRecord prepare, long eventNumber) { - var result = _readIndex.ReadEvent(IndexReader.UnspecifiedStreamName, prepare.EventStreamId, eventNumber, ITransactionFileTracker.NoOp); + var result = _readIndex.ReadEvent(IndexReader.UnspecifiedStreamName, prepare.EventStreamId, eventNumber, _tfTracker); if (result.Result == ReadEventResult.Success && result.Record.LogPosition != prepare.LogPosition) { // prepare isn't the record we get for an index read at its own stream/version. // therefore it is a duplicate that cannot be read from the index, discard it. @@ -701,13 +711,13 @@ private bool IsSoftDeletedTempStreamWithinSameChunk(TStreamId eventStreamId, lon TStreamId msh; if (_metastreams.IsMetaStream(eventStreamId)) { var originalStreamId = _metastreams.OriginalStreamOf(eventStreamId); - var meta = _readIndex.GetStreamMetadata(originalStreamId, ITransactionFileTracker.NoOp); + var meta = _readIndex.GetStreamMetadata(originalStreamId, _tfTracker); if (meta.TruncateBefore != EventNumber.DeletedStream || meta.TempStream != true) return false; sh = originalStreamId; msh = eventStreamId; } else { - var meta = _readIndex.GetStreamMetadata(eventStreamId, ITransactionFileTracker.NoOp); + var meta = _readIndex.GetStreamMetadata(eventStreamId, _tfTracker); if (meta.TruncateBefore != EventNumber.DeletedStream || meta.TempStream != true) return false; sh = eventStreamId; @@ -727,14 +737,15 @@ private bool IsSoftDeletedTempStreamWithinSameChunk(TStreamId eventStreamId, lon } private static void TraverseChunkBasic(TFChunk.TFChunk chunk, CancellationToken ct, + ITransactionFileTracker tracker, Action process) { - var result = chunk.TryReadFirst(ITransactionFileTracker.NoOp); //qq + var result = chunk.TryReadFirst(tracker); while (result.Success) { process(new CandidateRecord(result.LogRecord, result.RecordLength)); ct.ThrowIfCancellationRequested(); - result = chunk.TryReadClosestForward(result.NextPosition, ITransactionFileTracker.NoOp); //qq + result = chunk.TryReadClosestForward(result.NextPosition, tracker); } } diff --git a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/OldScavengeChunkMergerBackend.cs b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/OldScavengeChunkMergerBackend.cs index 28a3dbad62d..5988398f9f8 100644 --- a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/OldScavengeChunkMergerBackend.cs +++ b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/OldScavengeChunkMergerBackend.cs @@ -29,6 +29,7 @@ public void MergeChunks( maxChunkDataSize: _db.Config.ChunkSize, scavengerLog: scavengerLogger, throttle: throttle, + tracker: ITransactionFileTracker.NoOp, ct: cancellationToken); } } From 54ee607b13c551e4ec455aa9e5913918efc91959 Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Sat, 23 Nov 2024 09:27:03 +0000 Subject: [PATCH 23/38] more plumbing (new scavenge done) --- .../Scavenge/Infrastructure/Scenario.cs | 18 +++++++++++------- src/EventStore.Core/ClusterVNode.cs | 17 +++++++++++------ .../DbAccess/ChunkManagerForExecutor.cs | 7 +++++-- .../DbAccess/ChunkReaderForAccumulator.cs | 5 ++++- .../DbAccess/ChunkReaderForExecutor.cs | 9 ++++++--- .../DbAccess/ChunkReaderForIndexExecutor.cs | 6 +++--- .../DbAccess/IndexReaderForAccumulator.cs | 13 +++++++++---- .../DbAccess/IndexReaderForCalculator.cs | 17 ++++++++++------- .../DbAccess/OldScavengeChunkMergerBackend.cs | 6 ++++-- 9 files changed, 63 insertions(+), 35 deletions(-) diff --git a/src/EventStore.Core.XUnit.Tests/Scavenge/Infrastructure/Scenario.cs b/src/EventStore.Core.XUnit.Tests/Scavenge/Infrastructure/Scenario.cs index 30278de71ed..cd18fb822c3 100644 --- a/src/EventStore.Core.XUnit.Tests/Scavenge/Infrastructure/Scenario.cs +++ b/src/EventStore.Core.XUnit.Tests/Scavenge/Infrastructure/Scenario.cs @@ -247,6 +247,7 @@ private async Task RunInternalAsync( } var hasher = new CompositeHasher(lowHasher, highHasher); + var tracker = ITransactionFileTracker.NoOp; var tableIndex = new TableIndex( directory: indexPath, @@ -254,7 +255,7 @@ private async Task RunInternalAsync( highHasher: highHasher, emptyStreamId: logFormat.EmptyStreamId, memTableFactory: () => new HashListMemTable(PTableVersions.IndexV4, maxSize: 200), - tfReaderFactory: _ => new TFReaderLease(readerPool, ITransactionFileTracker.NoOp), + tfReaderFactory: _ => new TFReaderLease(readerPool, tracker), ptableVersion: PTableVersions.IndexV4, maxAutoMergeIndexLevel: int.MaxValue, pTableMaxReaderCount: ESConsts.PTableInitialReaderCount, @@ -317,9 +318,10 @@ private async Task RunInternalAsync( metastreamLookup, logFormat.StreamIdConverter, dbResult.Db.Config.ReplicationCheckpoint, + tracker, dbConfig.ChunkSize); - var indexReader = new IndexReaderForAccumulator(readIndex); + var indexReader = new IndexReaderForAccumulator(readIndex, tracker); var accumulatorMetastreamLookup = new AdHocMetastreamLookupInterceptor( metastreamLookup, @@ -332,8 +334,9 @@ private async Task RunInternalAsync( var calculatorIndexReader = new AdHocIndexReaderInterceptor( new IndexReaderForCalculator( readIndex, - tracker => new TFReaderLease(readerPool, tracker), - scavengeState.LookupUniqueHashUser), + () => new TFReaderLease(readerPool, tracker), + scavengeState.LookupUniqueHashUser, + tracker), (f, handle, from, maxCount, x) => { if (_calculatingCancellationTrigger != null) if ((handle.Kind == StreamHandle.Kind.Hash && handle.StreamHash == hasher.Hash(_calculatingCancellationTrigger)) || @@ -403,7 +406,8 @@ private async Task RunInternalAsync( new ChunkManagerForExecutor( logger, dbResult.Db.Manager, - dbConfig), + dbConfig, + tracker), Tracer), chunkSize: dbConfig.ChunkSize, unsafeIgnoreHardDeletes: _unsafeIgnoreHardDeletes, @@ -414,13 +418,13 @@ private async Task RunInternalAsync( IChunkMerger chunkMerger = new ChunkMerger( logger: logger, mergeChunks: _mergeChunks, - new OldScavengeChunkMergerBackend(logger, dbResult.Db), + new OldScavengeChunkMergerBackend(logger, dbResult.Db, tracker), throttle: throttle); IIndexExecutor indexExecutor = new IndexExecutor( logger: logger, indexScavenger: cancellationWrappedIndexScavenger, - streamLookup: new ChunkReaderForIndexExecutor(tracker => new TFReaderLease(readerPool, tracker)), + streamLookup: new ChunkReaderForIndexExecutor(() => new TFReaderLease(readerPool, tracker)), unsafeIgnoreHardDeletes: _unsafeIgnoreHardDeletes, restPeriod: restPeriod, throttle: throttle); diff --git a/src/EventStore.Core/ClusterVNode.cs b/src/EventStore.Core/ClusterVNode.cs index 49eb7958d3f..bd3b341d3a2 100644 --- a/src/EventStore.Core/ClusterVNode.cs +++ b/src/EventStore.Core/ClusterVNode.cs @@ -66,6 +66,7 @@ using Microsoft.Data.Sqlite; using Mono.Unix.Native; using ILogger = Serilog.ILogger; +using EventStore.Core.Services.UserManagement; namespace EventStore.Core { public abstract class ClusterVNode { @@ -1278,6 +1279,8 @@ GossipAdvertiseInfo GetGossipAdvertiseInfo() { }, dispose: backend => backend.Dispose()); + var tracker = trackers.TransactionFileTrackers.GetOrAdd(SystemAccounts.SystemScavengeName); + var state = new ScavengeState( logger, longHasher, @@ -1294,8 +1297,9 @@ GossipAdvertiseInfo GetGossipAdvertiseInfo() { logFormat.Metastreams, logFormat.StreamIdConverter, Db.Config.ReplicationCheckpoint, + tracker, TFConsts.ChunkSize), - index: new IndexReaderForAccumulator(readIndex), + index: new IndexReaderForAccumulator(readIndex, tracker), cancellationCheckPeriod: cancellationCheckPeriod, throttle: throttle); @@ -1303,8 +1307,9 @@ GossipAdvertiseInfo GetGossipAdvertiseInfo() { logger: logger, new IndexReaderForCalculator( readIndex, - tracker => new TFReaderLease(readerPool, tracker), - state.LookupUniqueHashUser), + () => new TFReaderLease(readerPool, tracker), + state.LookupUniqueHashUser, + tracker), chunkSize: TFConsts.ChunkSize, cancellationCheckPeriod: cancellationCheckPeriod, buffer: calculatorBuffer, @@ -1313,7 +1318,7 @@ GossipAdvertiseInfo GetGossipAdvertiseInfo() { var chunkExecutor = new ChunkExecutor( logger, logFormat.Metastreams, - new ChunkManagerForExecutor(logger, Db.Manager, Db.Config), + new ChunkManagerForExecutor(logger, Db.Manager, Db.Config, tracker), chunkSize: Db.Config.ChunkSize, unsafeIgnoreHardDeletes: options.Database.UnsafeIgnoreHardDelete, cancellationCheckPeriod: cancellationCheckPeriod, @@ -1323,13 +1328,13 @@ GossipAdvertiseInfo GetGossipAdvertiseInfo() { var chunkMerger = new ChunkMerger( logger: logger, mergeChunks: !options.Database.DisableScavengeMerging, - backend: new OldScavengeChunkMergerBackend(logger, db: Db), + backend: new OldScavengeChunkMergerBackend(logger, db: Db, tracker: tracker), throttle: throttle); var indexExecutor = new IndexExecutor( logger, new IndexScavenger(tableIndex), - new ChunkReaderForIndexExecutor(tracker => new TFReaderLease(readerPool, tracker)), + new ChunkReaderForIndexExecutor(() => new TFReaderLease(readerPool, tracker)), unsafeIgnoreHardDeletes: options.Database.UnsafeIgnoreHardDelete, restPeriod: 32_768, throttle: throttle); diff --git a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkManagerForExecutor.cs b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkManagerForExecutor.cs index 6d4120f3110..7440aa15088 100644 --- a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkManagerForExecutor.cs +++ b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkManagerForExecutor.cs @@ -9,11 +9,14 @@ public class ChunkManagerForExecutor : IChunkManagerForChunkExecutor< private readonly ILogger _logger; private readonly TFChunkManager _manager; private readonly TFChunkDbConfig _dbConfig; + private readonly ITransactionFileTracker _tracker; - public ChunkManagerForExecutor(ILogger logger, TFChunkManager manager, TFChunkDbConfig dbConfig) { + public ChunkManagerForExecutor(ILogger logger, TFChunkManager manager, TFChunkDbConfig dbConfig, + ITransactionFileTracker tracker) { _logger = logger; _manager = manager; _dbConfig = dbConfig; + _tracker = tracker; } public IChunkWriterForExecutor CreateChunkWriter( @@ -24,7 +27,7 @@ public IChunkWriterForExecutor CreateChunkWriter( public IChunkReaderForExecutor GetChunkReaderFor(long position) { var tfChunk = _manager.GetChunkFor(position); - return new ChunkReaderForExecutor(tfChunk); + return new ChunkReaderForExecutor(tfChunk, _tracker); } public void SwitchChunk( diff --git a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkReaderForAccumulator.cs b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkReaderForAccumulator.cs index 84f64eaca5a..85d1b8a10d8 100644 --- a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkReaderForAccumulator.cs +++ b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkReaderForAccumulator.cs @@ -14,6 +14,7 @@ public class ChunkReaderForAccumulator : IChunkReaderForAccumulator _metaStreamLookup; private readonly IStreamIdConverter _streamIdConverter; private readonly ICheckpoint _replicationChk; + private readonly ITransactionFileTracker _tracker; private readonly int _chunkSize; private readonly Func _getBuffer; @@ -24,12 +25,14 @@ public ChunkReaderForAccumulator( IMetastreamLookup metastreamLookup, IStreamIdConverter streamIdConverter, ICheckpoint replicationChk, + ITransactionFileTracker tracker, int chunkSize) { _manager = manager; _metaStreamLookup = metastreamLookup; _streamIdConverter = streamIdConverter; _replicationChk = replicationChk; + _tracker = tracker; _chunkSize = chunkSize; var reusableRecordBuffer = new ReusableBuffer(8192); @@ -62,7 +65,7 @@ public IEnumerable ReadChunkInto( var localPos = chunk.ChunkHeader.GetLocalLogPosition(nextPos); - var result = chunk.TryReadClosestForwardRaw(localPos, _getBuffer, ITransactionFileTracker.NoOp); //qq plumb through all occurrences of noop + var result = chunk.TryReadClosestForwardRaw(localPos, _getBuffer, _tracker); if (!result.Success) { // there is no need to release the reusable buffer here since result.Success is false diff --git a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkReaderForExecutor.cs b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkReaderForExecutor.cs index 28505e7ec8f..88ab21045bb 100644 --- a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkReaderForExecutor.cs +++ b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkReaderForExecutor.cs @@ -6,9 +6,12 @@ namespace EventStore.Core.TransactionLog.Scavenging { public class ChunkReaderForExecutor : IChunkReaderForExecutor { private readonly TFChunk _chunk; + private readonly ITransactionFileTracker _tracker; - public ChunkReaderForExecutor(TFChunk chunk) { + public ChunkReaderForExecutor(TFChunk chunk, + ITransactionFileTracker tracker) { _chunk = chunk; + _tracker = tracker; } public string Name => _chunk.ToString(); @@ -30,7 +33,7 @@ public IEnumerable ReadInto( RecordForExecutor.NonPrepare nonPrepare, RecordForExecutor.Prepare prepare) { - var result = _chunk.TryReadFirst(ITransactionFileTracker.NoOp); //qq + var result = _chunk.TryReadFirst(_tracker); while (result.Success) { var record = result.LogRecord; if (record.RecordType != LogRecordType.Prepare) { @@ -51,7 +54,7 @@ public IEnumerable ReadInto( yield return true; } - result = _chunk.TryReadClosestForward(result.NextPosition, ITransactionFileTracker.NoOp); //qq + result = _chunk.TryReadClosestForward(result.NextPosition, _tracker); } } } diff --git a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkReaderForIndexExecutor.cs b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkReaderForIndexExecutor.cs index 22dcbf81bfe..f35c97556b7 100644 --- a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkReaderForIndexExecutor.cs +++ b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkReaderForIndexExecutor.cs @@ -3,14 +3,14 @@ namespace EventStore.Core.TransactionLog.Scavenging { public class ChunkReaderForIndexExecutor : IChunkReaderForIndexExecutor { - private readonly Func _tfReaderFactory; + private readonly Func _tfReaderFactory; - public ChunkReaderForIndexExecutor(Func tfReaderFactory) { + public ChunkReaderForIndexExecutor(Func tfReaderFactory) { _tfReaderFactory = tfReaderFactory; } public bool TryGetStreamId(long position, out TStreamId streamId) { - using (var reader = _tfReaderFactory(ITransactionFileTracker.NoOp)) { //qq + using (var reader = _tfReaderFactory()) { var result = reader.TryReadAt(position, couldBeScavenged: true); if (!result.Success) { streamId = default; diff --git a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/IndexReaderForAccumulator.cs b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/IndexReaderForAccumulator.cs index 5bd0f47c321..d1fedb42ef2 100644 --- a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/IndexReaderForAccumulator.cs +++ b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/IndexReaderForAccumulator.cs @@ -4,9 +4,11 @@ namespace EventStore.Core.TransactionLog.Scavenging { public class IndexReaderForAccumulator : IIndexReaderForAccumulator { private readonly IReadIndex _readIndex; + private readonly ITransactionFileTracker _tracker; - public IndexReaderForAccumulator(IReadIndex readIndex) { + public IndexReaderForAccumulator(IReadIndex readIndex, ITransactionFileTracker tracker) { _readIndex = readIndex; + _tracker = tracker; } // reads a stream forward but only returns event info not the full event. @@ -30,7 +32,8 @@ public IndexReadEventInfoResult ReadEventInfoForward( handle.StreamId, fromEventNumber, maxCount, - scavengePoint.Position, ITransactionFileTracker.NoOp); + scavengePoint.Position, + _tracker); default: throw new ArgumentOutOfRangeException(nameof(handle), handle, null); } @@ -52,14 +55,16 @@ public IndexReadEventInfoResult ReadEventInfoBackward( _ => streamId, fromEventNumber, maxCount, - scavengePoint.Position, ITransactionFileTracker.NoOp); + scavengePoint.Position, + _tracker); case StreamHandle.Kind.Id: // uses log to check for hash collisions return _readIndex.ReadEventInfoBackward_KnownCollisions( handle.StreamId, fromEventNumber, maxCount, - scavengePoint.Position, ITransactionFileTracker.NoOp); + scavengePoint.Position, + _tracker); default: throw new ArgumentOutOfRangeException(nameof(handle), handle, null); } diff --git a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/IndexReaderForCalculator.cs b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/IndexReaderForCalculator.cs index 6edcf0765fa..655938ddbbe 100644 --- a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/IndexReaderForCalculator.cs +++ b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/IndexReaderForCalculator.cs @@ -5,17 +5,20 @@ namespace EventStore.Core.TransactionLog.Scavenging { public class IndexReaderForCalculator : IIndexReaderForCalculator { private readonly IReadIndex _readIndex; - private readonly Func _tfReaderFactory; + private readonly Func _tfReaderFactory; private readonly Func _lookupUniqueHashUser; + private readonly ITransactionFileTracker _tracker; public IndexReaderForCalculator( IReadIndex readIndex, - Func tfReaderFactory, - Func lookupUniqueHashUser) { + Func tfReaderFactory, + Func lookupUniqueHashUser, + ITransactionFileTracker tracker) { _readIndex = readIndex; _tfReaderFactory = tfReaderFactory; _lookupUniqueHashUser = lookupUniqueHashUser; + _tracker = tracker; } public long GetLastEventNumber( @@ -28,12 +31,12 @@ public long GetLastEventNumber( return _readIndex.GetStreamLastEventNumber_NoCollisions( handle.StreamHash, _lookupUniqueHashUser, - scavengePoint.Position, ITransactionFileTracker.NoOp); + scavengePoint.Position, _tracker); case StreamHandle.Kind.Id: // uses the index and the log to fetch the last event number return _readIndex.GetStreamLastEventNumber_KnownCollisions( handle.StreamId, - scavengePoint.Position, ITransactionFileTracker.NoOp); + scavengePoint.Position, _tracker); default: throw new ArgumentOutOfRangeException(nameof(handle), handle, null); } @@ -59,14 +62,14 @@ public IndexReadEventInfoResult ReadEventInfoForward( handle.StreamId, fromEventNumber, maxCount, - scavengePoint.Position, ITransactionFileTracker.NoOp); + scavengePoint.Position, _tracker); default: throw new ArgumentOutOfRangeException(nameof(handle), handle, null); } } public bool IsTombstone(long logPosition) { - using (var reader = _tfReaderFactory(ITransactionFileTracker.NoOp)) { //qq + using (var reader = _tfReaderFactory()) { var result = reader.TryReadAt(logPosition, couldBeScavenged: true); if (!result.Success) diff --git a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/OldScavengeChunkMergerBackend.cs b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/OldScavengeChunkMergerBackend.cs index 5988398f9f8..b7f5e15eba8 100644 --- a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/OldScavengeChunkMergerBackend.cs +++ b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/OldScavengeChunkMergerBackend.cs @@ -6,10 +6,12 @@ namespace EventStore.Core.TransactionLog.Scavenging { public class OldScavengeChunkMergerBackend : IChunkMergerBackend { private readonly ILogger _logger; private readonly TFChunkDb _db; + private readonly ITransactionFileTracker _tracker; - public OldScavengeChunkMergerBackend(ILogger logger, TFChunkDb db) { + public OldScavengeChunkMergerBackend(ILogger logger, TFChunkDb db, ITransactionFileTracker tracker) { _logger = logger; _db = db; + _tracker = tracker; } public void MergeChunks( @@ -29,7 +31,7 @@ public void MergeChunks( maxChunkDataSize: _db.Config.ChunkSize, scavengerLog: scavengerLogger, throttle: throttle, - tracker: ITransactionFileTracker.NoOp, + tracker: _tracker, ct: cancellationToken); } } From 1707e8f08748e0491f783c6c5dddf4b7fca1e74a Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Sat, 23 Nov 2024 10:13:16 +0000 Subject: [PATCH 24/38] more plumbing (subscriptions done) --- .../Services/Transport/Grpc/EnumeratorsTests.cs | 3 +++ src/EventStore.Core/ClusterVNode.cs | 2 +- src/EventStore.Core/ClusterVNodeStartup.cs | 1 + .../Services/SubscriptionsService.cs | 14 ++++++++++---- .../Transport/Grpc/Enumerators.AllSubscription.cs | 5 ++++- .../Grpc/Enumerators.AllSubscriptionFiltered.cs | 5 ++++- .../Services/Transport/Grpc/Streams.Read.cs | 4 ++++ .../Services/Transport/Grpc/Streams.cs | 4 ++++ .../Services/UserManagement/SystemAccounts.cs | 1 + src/EventStore.Core/Telemetry/TelemetryService.cs | 2 +- .../TransactionLog/Chunks/TFChunk/TFChunk.cs | 2 +- .../Chunks/TFChunkReaderExistsAtOptimizer.cs | 2 +- 12 files changed, 35 insertions(+), 10 deletions(-) diff --git a/src/EventStore.Core.Tests/Services/Transport/Grpc/EnumeratorsTests.cs b/src/EventStore.Core.Tests/Services/Transport/Grpc/EnumeratorsTests.cs index 75b61c7438b..e81d39b0d77 100644 --- a/src/EventStore.Core.Tests/Services/Transport/Grpc/EnumeratorsTests.cs +++ b/src/EventStore.Core.Tests/Services/Transport/Grpc/EnumeratorsTests.cs @@ -9,6 +9,7 @@ using EventStore.Core.Services.UserManagement; using EventStore.Core.Tests.Helpers; using EventStore.Core.Tests.TransactionLog; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.Services.Transport.Grpc; @@ -159,6 +160,7 @@ public static SubscriptionWrapper CreateAllSubscription( requiresLeader: false, readIndex: new FakeReadIndex(_ => false, null), uuidOption: new ReadReq.Types.Options.Types.UUIDOption(), + tracker: ITransactionFileTracker.NoOp, cancellationToken: CancellationToken.None)); } @@ -179,6 +181,7 @@ public static SubscriptionWrapper CreateAllSubscriptionFiltered()); _mainBus.Subscribe(subscrQueue.WidenFrom()); - var subscription = new SubscriptionsService(_mainQueue, subscrQueue, readIndex); + var subscription = new SubscriptionsService(_mainQueue, subscrQueue, readIndex, trackers.TransactionFileTrackers); subscrBus.Subscribe(subscription); subscrBus.Subscribe(subscription); subscrBus.Subscribe(subscription); diff --git a/src/EventStore.Core/ClusterVNodeStartup.cs b/src/EventStore.Core/ClusterVNodeStartup.cs index 3956d2bf8e1..106a575272c 100644 --- a/src/EventStore.Core/ClusterVNodeStartup.cs +++ b/src/EventStore.Core/ClusterVNodeStartup.cs @@ -181,6 +181,7 @@ public IServiceCollection ConfigureServices(IServiceCollection services) => .AddSingleton(_readIndex) .AddSingleton(new Streams(_mainQueue, _readIndex, _maxAppendSize, _writeTimeout, _expiryStrategy, + _trackers.TransactionFileTrackers, _trackers.GrpcTrackers, _authorizationProvider)) .AddSingleton(new PersistentSubscriptions(_mainQueue, _authorizationProvider)) diff --git a/src/EventStore.Core/Services/SubscriptionsService.cs b/src/EventStore.Core/Services/SubscriptionsService.cs index 447a0d1aeb2..9e9e958b16f 100644 --- a/src/EventStore.Core/Services/SubscriptionsService.cs +++ b/src/EventStore.Core/Services/SubscriptionsService.cs @@ -11,6 +11,7 @@ using EventStore.Core.Util; using ILogger = Serilog.ILogger; using EventStore.Core.TransactionLog; +using EventStore.Core.Services.UserManagement; namespace EventStore.Core.Services { public enum SubscriptionDropReason { @@ -57,12 +58,15 @@ public class SubscriptionsService : private readonly IEnvelope _busEnvelope; private readonly IQueuedHandler _queuedHandler; private readonly IReadIndex _readIndex; + private readonly ITransactionFileTrackerFactory _tfTrackers; + private readonly ITransactionFileTracker _tfTracker; private static readonly char[] _linkToSeparator = new[] { '@' }; public SubscriptionsService( IPublisher bus, IQueuedHandler queuedHandler, - IReadIndex readIndex) { + IReadIndex readIndex, + ITransactionFileTrackerFactory tfTrackers) { Ensure.NotNull(bus, "bus"); Ensure.NotNull(queuedHandler, "queuedHandler"); @@ -72,6 +76,8 @@ public SubscriptionsService( _busEnvelope = new PublishEnvelope(bus); _queuedHandler = queuedHandler; _readIndex = readIndex; + _tfTrackers = tfTrackers; + _tfTracker = tfTrackers.For(SystemAccounts.SystemSubscriptionsName); } public void Handle(SystemMessage.SystemStart message) { @@ -121,7 +127,7 @@ public void Handle(ClientMessage.SubscribeToStream msg) { if (isInMemoryStream) { lastEventNumber = -1; } else if (!msg.EventStreamId.IsEmptyString()) { - lastEventNumber = _readIndex.GetStreamLastEventNumber(_readIndex.GetStreamId(msg.EventStreamId), ITransactionFileTracker.NoOp); + lastEventNumber = _readIndex.GetStreamLastEventNumber(_readIndex.GetStreamId(msg.EventStreamId), _tfTrackers.For(msg)); } var lastIndexedPos = isInMemoryStream ? -1 : _readIndex.LastIndexedPosition; @@ -141,7 +147,7 @@ public void Handle(ClientMessage.FilteredSubscribeToStream msg) { if (isInMemoryStream) { lastEventNumber = -1; } else if (!msg.EventStreamId.IsEmptyString()) { - lastEventNumber = _readIndex.GetStreamLastEventNumber(_readIndex.GetStreamId(msg.EventStreamId), ITransactionFileTracker.NoOp); + lastEventNumber = _readIndex.GetStreamLastEventNumber(_readIndex.GetStreamId(msg.EventStreamId), _tfTrackers.For(msg)); } var lastIndexedPos = isInMemoryStream ? -1 : _readIndex.LastIndexedPosition; @@ -343,7 +349,7 @@ private ResolvedEvent ResolveLinkToEvent(EventRecord eventRecord, long commitPos long eventNumber = long.Parse(parts[0]); string streamName = parts[1]; var streamId = _readIndex.GetStreamId(streamName); - var res = _readIndex.ReadEvent(streamName, streamId, eventNumber, ITransactionFileTracker.NoOp); + var res = _readIndex.ReadEvent(streamName, streamId, eventNumber, _tfTracker); if (res.Result == ReadEventResult.Success) return ResolvedEvent.ForResolvedLink(res.Record, eventRecord, commitPosition); diff --git a/src/EventStore.Core/Services/Transport/Grpc/Enumerators.AllSubscription.cs b/src/EventStore.Core/Services/Transport/Grpc/Enumerators.AllSubscription.cs index 6435f8f9b67..61fb8346170 100644 --- a/src/EventStore.Core/Services/Transport/Grpc/Enumerators.AllSubscription.cs +++ b/src/EventStore.Core/Services/Transport/Grpc/Enumerators.AllSubscription.cs @@ -26,6 +26,7 @@ public class AllSubscription : IAsyncEnumerator { private readonly bool _requiresLeader; private readonly IReadIndex _readIndex; private readonly ReadReq.Types.Options.Types.UUIDOption _uuidOption; + private readonly ITransactionFileTracker _tracker; private readonly CancellationToken _cancellationToken; private readonly Channel _channel; private readonly SemaphoreSlim _semaphore; @@ -46,6 +47,7 @@ public AllSubscription(IPublisher bus, bool requiresLeader, IReadIndex readIndex, ReadReq.Types.Options.Types.UUIDOption uuidOption, + ITransactionFileTracker tracker, CancellationToken cancellationToken) { if (bus == null) { throw new ArgumentNullException(nameof(bus)); @@ -63,6 +65,7 @@ public AllSubscription(IPublisher bus, _requiresLeader = requiresLeader; _readIndex = readIndex; _uuidOption = uuidOption; + _tracker = tracker; _cancellationToken = cancellationToken; _channel = Channel.CreateBounded(BoundedChannelOptions); _semaphore = new SemaphoreSlim(1, 1); @@ -129,7 +132,7 @@ private void Subscribe(Position? startPosition) { var (commitPosition, preparePosition) = startPosition.Value.ToInt64(); try { var indexResult = - _readIndex.ReadAllEventsForward(new TFPos(commitPosition, preparePosition), 1, ITransactionFileTracker.NoOp); + _readIndex.ReadAllEventsForward(new TFPos(commitPosition, preparePosition), 1, _tracker); CatchUp(Position.FromInt64(indexResult.NextPos.CommitPosition, indexResult.NextPos.PreparePosition)); } catch (Exception ex) { diff --git a/src/EventStore.Core/Services/Transport/Grpc/Enumerators.AllSubscriptionFiltered.cs b/src/EventStore.Core/Services/Transport/Grpc/Enumerators.AllSubscriptionFiltered.cs index 18b4c04f7e3..2d1059e98e4 100644 --- a/src/EventStore.Core/Services/Transport/Grpc/Enumerators.AllSubscriptionFiltered.cs +++ b/src/EventStore.Core/Services/Transport/Grpc/Enumerators.AllSubscriptionFiltered.cs @@ -28,6 +28,7 @@ public class AllSubscriptionFiltered : IAsyncEnumerator { private readonly bool _requiresLeader; private readonly IReadIndex _readIndex; private readonly ReadReq.Types.Options.Types.UUIDOption _uuidOption; + private readonly ITransactionFileTracker _tfTracker; private readonly uint _maxSearchWindow; private readonly CancellationToken _cancellationToken; private readonly Channel _channel; @@ -54,6 +55,7 @@ public AllSubscriptionFiltered(IPublisher bus, uint? maxSearchWindow, uint checkpointIntervalMultiplier, ReadReq.Types.Options.Types.UUIDOption uuidOption, + ITransactionFileTracker tfTracker, CancellationToken cancellationToken) { if (bus == null) { throw new ArgumentNullException(nameof(bus)); @@ -81,6 +83,7 @@ public AllSubscriptionFiltered(IPublisher bus, _readIndex = readIndex; _maxSearchWindow = maxSearchWindow ?? ReadBatchSize; _uuidOption = uuidOption; + _tfTracker = tfTracker; _cancellationToken = cancellationToken; _subscriptionStarted = 0; _channel = Channel.CreateBounded(BoundedChannelOptions); @@ -152,7 +155,7 @@ private void Subscribe(Position? startPosition) { var (commitPosition, preparePosition) = startPosition.Value.ToInt64(); try { var indexResult = - _readIndex.ReadAllEventsForward(new TFPos(commitPosition, preparePosition), 1, ITransactionFileTracker.NoOp); + _readIndex.ReadAllEventsForward(new TFPos(commitPosition, preparePosition), 1, _tfTracker); CatchUp(Position.FromInt64(indexResult.NextPos.CommitPosition, indexResult.NextPos.PreparePosition)); } catch (Exception ex) { diff --git a/src/EventStore.Core/Services/Transport/Grpc/Streams.Read.cs b/src/EventStore.Core/Services/Transport/Grpc/Streams.Read.cs index 2fcaaed9753..99d54b8cdea 100644 --- a/src/EventStore.Core/Services/Transport/Grpc/Streams.Read.cs +++ b/src/EventStore.Core/Services/Transport/Grpc/Streams.Read.cs @@ -3,6 +3,7 @@ using System.Linq; using System.Threading.Tasks; using EventStore.Client.Streams; +using EventStore.Core.Messages; using EventStore.Core.Metrics; using EventStore.Core.Services.Storage.ReaderIndex; using Grpc.Core; @@ -29,6 +30,7 @@ public override async Task Read( var compatibility = options.ControlOption?.Compatibility ?? 0; var user = context.GetHttpContext().User; + var tfTracker = _tfTrackers.For(user); var requiresLeader = GetRequiresLeader(context.RequestHeaders); var op = streamOptionsCase switch { @@ -170,6 +172,7 @@ public override async Task Read( requiresLeader, _readIndex, options.UuidOption, + tfTracker, context.CancellationToken), (StreamOptionOneofCase.All, CountOptionOneofCase.Subscription, @@ -191,6 +194,7 @@ public override async Task Read( }, request.Options.Filter.CheckpointIntervalMultiplier, options.UuidOption, + tfTracker, context.CancellationToken), _ => throw RpcExceptions.InvalidCombination((streamOptionsCase, countOptionsCase, readDirection, filterOptionsCase)) diff --git a/src/EventStore.Core/Services/Transport/Grpc/Streams.cs b/src/EventStore.Core/Services/Transport/Grpc/Streams.cs index a11fb5f5ccb..ea8add97310 100644 --- a/src/EventStore.Core/Services/Transport/Grpc/Streams.cs +++ b/src/EventStore.Core/Services/Transport/Grpc/Streams.cs @@ -3,6 +3,7 @@ using EventStore.Core.Bus; using EventStore.Core.Metrics; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Plugins.Authorization; namespace EventStore.Core.Services.Transport.Grpc { @@ -12,6 +13,7 @@ internal partial class Streams : EventStore.Client.Streams.Streams.St private readonly int _maxAppendSize; private readonly TimeSpan _writeTimeout; private readonly IExpiryStrategy _expiryStrategy; + private readonly ITransactionFileTrackerFactory _tfTrackers; private readonly IDurationTracker _readTracker; private readonly IDurationTracker _appendTracker; private readonly IDurationTracker _batchAppendTracker; @@ -24,6 +26,7 @@ internal partial class Streams : EventStore.Client.Streams.Streams.St public Streams(IPublisher publisher, IReadIndex readIndex, int maxAppendSize, TimeSpan writeTimeout, IExpiryStrategy expiryStrategy, + ITransactionFileTrackerFactory tfTrackers, GrpcTrackers trackers, IAuthorizationProvider provider) { @@ -33,6 +36,7 @@ public Streams(IPublisher publisher, IReadIndex readIndex, int maxApp _maxAppendSize = maxAppendSize; _writeTimeout = writeTimeout; _expiryStrategy = expiryStrategy; + _tfTrackers = tfTrackers; _readTracker = trackers[MetricsConfiguration.GrpcMethod.StreamRead]; _appendTracker = trackers[MetricsConfiguration.GrpcMethod.StreamAppend]; _batchAppendTracker = trackers[MetricsConfiguration.GrpcMethod.StreamBatchAppend]; diff --git a/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs b/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs index f41801d81f2..13b9aa44d6f 100644 --- a/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs +++ b/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs @@ -19,5 +19,6 @@ public class SystemAccounts { public static readonly string SystemIndexCommitterName = "system-index-committer"; public static readonly string SystemPersistentSubscriptionsName = "system-persistent-subscriptions"; public static readonly string SystemScavengeName = "system-scavenge"; + public static readonly string SystemSubscriptionsName = "system-subscriptions"; } } diff --git a/src/EventStore.Core/Telemetry/TelemetryService.cs b/src/EventStore.Core/Telemetry/TelemetryService.cs index c0fb4597eb9..4d498848781 100644 --- a/src/EventStore.Core/Telemetry/TelemetryService.cs +++ b/src/EventStore.Core/Telemetry/TelemetryService.cs @@ -179,7 +179,7 @@ private static void OnGossipReceived(IEnvelope envelo private void ReadFirstEpoch() { try { var chunk = _manager.GetChunkFor(0); - var result = chunk.TryReadAt(0, false, ITransactionFileTracker.NoOp); + var result = chunk.TryReadAt(0, false, ITransactionFileTracker.NoOp); // noop ok, immaterial if (!result.Success) return; diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs index ea790730d05..4ddfbc5a6de 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs @@ -254,7 +254,7 @@ private void InitCompleted(bool verifyHash, bool optimizeReadSideCache) { CreateReaderStreams(); // no need to track reading the header/footer (currently we only track Prepares read anyway) - var reader = GetReaderWorkItem(ITransactionFileTracker.NoOp); + var reader = GetReaderWorkItem(ITransactionFileTracker.NoOp); // noop ok, not reading records try { _chunkHeader = ReadHeader(reader.Stream); Log.Debug("Opened completed {chunk} as version {version}", _filename, _chunkHeader.Version); diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunkReaderExistsAtOptimizer.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunkReaderExistsAtOptimizer.cs index 5d433c63113..aad15036b53 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunkReaderExistsAtOptimizer.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunkReaderExistsAtOptimizer.cs @@ -34,7 +34,7 @@ public TFChunkReaderExistsAtOptimizer(int maxCached) { if (chunk == null) return false; Log.Debug("Optimizing chunk {chunk} for fast merge...", chunk.FileName); - chunk.OptimizeExistsAt(ITransactionFileTracker.NoOp); + chunk.OptimizeExistsAt(ITransactionFileTracker.NoOp); // noop ok, deprecated path return true; }; From a49835c20d0886c723848117ee183defa9f9c0c1 Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Sat, 23 Nov 2024 11:03:47 +0000 Subject: [PATCH 25/38] more plumbing (index writer done) --- .../Storage/WriteEventsToIndexScenario.cs | 2 +- .../Services/Storage/ReaderIndex/AllReader.cs | 2 +- .../Storage/ReaderIndex/IndexWriter.cs | 26 +++++++++++-------- .../Services/Storage/ReaderIndex/ReadIndex.cs | 2 +- .../Services/UserManagement/SystemAccounts.cs | 4 ++- .../TransactionLog/Chunks/TFChunk/TFChunk.cs | 2 +- 6 files changed, 22 insertions(+), 16 deletions(-) diff --git a/src/EventStore.Core.Tests/Services/Storage/WriteEventsToIndexScenario.cs b/src/EventStore.Core.Tests/Services/Storage/WriteEventsToIndexScenario.cs index 5a704c258a2..615a48f7958 100644 --- a/src/EventStore.Core.Tests/Services/Storage/WriteEventsToIndexScenario.cs +++ b/src/EventStore.Core.Tests/Services/Storage/WriteEventsToIndexScenario.cs @@ -148,7 +148,7 @@ public override async Task TestFixtureSetUp() { _streamNames = _logFormat.StreamNames; _systemStreams = _logFormat.SystemStreams; _indexWriter = new IndexWriter(_indexBackend, _indexReader, _streamIds, _streamNames, - _systemStreams, emptyStreamId, _sizer); + _systemStreams, emptyStreamId, ITransactionFileTrackerFactory.NoOp, _sizer); _indexCommitter = new IndexCommitter(_publisher, _indexBackend, _indexReader, _tableIndex, _logFormat.StreamNameIndexConfirmer, _streamNames, _logFormat.EventTypeIndexConfirmer, _logFormat.EventTypes, _systemStreams, _logFormat.StreamExistenceFilter, _logFormat.StreamExistenceFilterInitializer, new InMemoryCheckpoint(-1), new IndexStatusTracker.NoOp(), new IndexTracker.NoOp(), diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/AllReader.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/AllReader.cs index 2ab45c4f4de..a3cf4985fb9 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/AllReader.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/AllReader.cs @@ -209,7 +209,7 @@ private IndexReadAllResult ReadAllEventsBackwardInternal(TFPos pos, int maxCount var consideredEventsCount = 0L; bool firstCommit = true; var reachedEndOfStream = false; - using (var reader = _backend.BorrowReader(tracker)) { //qq + using (var reader = _backend.BorrowReader(tracker)) { long nextCommitPostPos = pos.CommitPosition; while (records.Count < maxCount && consideredEventsCount < maxSearchWindow) { reader.Reposition(nextCommitPostPos); diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/IndexWriter.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/IndexWriter.cs index 394e2065c30..f071f2695ab 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/IndexWriter.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/IndexWriter.cs @@ -7,6 +7,7 @@ using EventStore.Core.Data; using EventStore.Core.DataStructures; using EventStore.Core.LogAbstraction; +using EventStore.Core.Services.UserManagement; using EventStore.Core.Settings; using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; @@ -68,6 +69,7 @@ public long NotCachedTransInfo { private readonly INameLookup _streamNames; private readonly ISystemStreamLookup _systemStreams; private readonly TStreamId _emptyStreamId; + private readonly ITransactionFileTracker _tracker; private readonly IStickyLRUCache> _transactionInfoCache = new StickyLRUCache>(ESConsts.TransactionMetadataCacheCapacity); @@ -94,6 +96,7 @@ public IndexWriter( INameLookup streamNames, ISystemStreamLookup systemStreams, TStreamId emptyStreamId, + ITransactionFileTrackerFactory trackers, ISizer inMemorySizer) { Ensure.NotNull(indexBackend, "indexBackend"); Ensure.NotNull(indexReader, "indexReader"); @@ -110,6 +113,7 @@ public IndexWriter( _streamNames = streamNames; _systemStreams = systemStreams; _emptyStreamId = emptyStreamId; + _tracker = trackers.GetOrAdd(SystemAccounts.SystemWriterName); } public void Reset() { @@ -123,7 +127,7 @@ public void Reset() { public CommitCheckResult CheckCommitStartingAt(long transactionPosition, long commitPosition) { TStreamId streamId; long expectedVersion; - using (var reader = _indexBackend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq + using (var reader = _indexBackend.BorrowReader(_tracker)) { try { var prepare = GetPrepare(reader, transactionPosition); if (prepare == null) { @@ -213,11 +217,11 @@ public CommitCheckResult CheckCommit(TStreamId streamId, long expecte if(first) /*no data in transaction*/ return new CommitCheckResult(CommitDecision.Ok, streamId, curVersion, -1, -1, IsSoftDeleted(streamId)); else{ - var isReplicated = _indexReader.GetStreamLastEventNumber(streamId, ITransactionFileTracker.NoOp) >= endEventNumber; + var isReplicated = _indexReader.GetStreamLastEventNumber(streamId, _tracker) >= endEventNumber; //TODO(clc): the new index should hold the log positions removing this read //n.b. the index will never have the event in the case of NotReady as it only committed records are indexed //in that case the position will need to come from the pre-index - var idempotentEvent = _indexReader.ReadEvent(IndexReader.UnspecifiedStreamName, streamId, endEventNumber, ITransactionFileTracker.NoOp); + var idempotentEvent = _indexReader.ReadEvent(IndexReader.UnspecifiedStreamName, streamId, endEventNumber, _tracker); var logPos = idempotentEvent.Result == ReadEventResult.Success ? idempotentEvent.Record.LogPosition : -1; if(isReplicated) @@ -238,7 +242,7 @@ public CommitCheckResult CheckCommit(TStreamId streamId, long expecte && prepInfo.EventNumber == eventNumber) continue; - var res = _indexReader.ReadPrepare(streamId, eventNumber, ITransactionFileTracker.NoOp); + var res = _indexReader.ReadPrepare(streamId, eventNumber, _tracker); if (res != null && res.EventId == eventId) continue; @@ -257,11 +261,11 @@ public CommitCheckResult CheckCommit(TStreamId streamId, long expecte if(eventNumber == expectedVersion) /* no data in transaction */ return new CommitCheckResult(CommitDecision.WrongExpectedVersion, streamId, curVersion, -1, -1, false); else{ - var isReplicated = _indexReader.GetStreamLastEventNumber(streamId, ITransactionFileTracker.NoOp) >= eventNumber; + var isReplicated = _indexReader.GetStreamLastEventNumber(streamId, _tracker) >= eventNumber; //TODO(clc): the new index should hold the log positions removing this read //n.b. the index will never have the event in the case of NotReady as it only committed records are indexed //in that case the position will need to come from the pre-index - var idempotentEvent = _indexReader.ReadEvent(IndexReader.UnspecifiedStreamName, streamId, eventNumber, ITransactionFileTracker.NoOp); + var idempotentEvent = _indexReader.ReadEvent(IndexReader.UnspecifiedStreamName, streamId, eventNumber, _tracker); var logPos = idempotentEvent.Result == ReadEventResult.Success ? idempotentEvent.Record.LogPosition : -1; if(isReplicated) @@ -362,7 +366,7 @@ public TransactionInfo GetTransactionInfo(long writerCheckpoint, long private bool GetTransactionInfoUncached(long writerCheckpoint, long transactionId, out TransactionInfo transactionInfo) { - using (var reader = _indexBackend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq + using (var reader = _indexBackend.BorrowReader(_tracker)) { reader.Reposition(writerCheckpoint); SeqReadResult result; while ((result = reader.TryReadPrev()).Success) { @@ -424,7 +428,7 @@ public void PurgeNotProcessedTransactions(long checkpoint) { } private IEnumerable> GetTransactionPrepares(long transactionPos, long commitPos) { - using (var reader = _indexBackend.BorrowReader(ITransactionFileTracker.NoOp)) { //qq + using (var reader = _indexBackend.BorrowReader(_tracker)) { reader.Reposition(transactionPos); // in case all prepares were scavenged, we should not read past Commit LogPosition @@ -459,7 +463,7 @@ public long GetStreamLastEventNumber(TStreamId streamId) { long lastEventNumber; if (_streamVersions.TryGet(streamId, out lastEventNumber)) return lastEventNumber; - return _indexReader.GetStreamLastEventNumber(streamId, ITransactionFileTracker.NoOp); + return _indexReader.GetStreamLastEventNumber(streamId, _tracker); } public StreamMetadata GetStreamMetadata(TStreamId streamId) { @@ -472,7 +476,7 @@ public StreamMetadata GetStreamMetadata(TStreamId streamId) { return m; } - return _indexReader.GetStreamMetadata(streamId, ITransactionFileTracker.NoOp); + return _indexReader.GetStreamMetadata(streamId, _tracker); } public RawMetaInfo GetStreamRawMeta(TStreamId streamId) { @@ -481,7 +485,7 @@ public RawMetaInfo GetStreamRawMeta(TStreamId streamId) { StreamMeta meta; if (!_streamRawMetas.TryGet(streamId, out meta)) - meta = new StreamMeta(_indexReader.ReadPrepare(metastreamId, metaLastEventNumber, ITransactionFileTracker.NoOp).Data, null); + meta = new StreamMeta(_indexReader.ReadPrepare(metastreamId, metaLastEventNumber, _tracker).Data, null); return new RawMetaInfo(metaLastEventNumber, meta.RawMeta); } diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/ReadIndex.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/ReadIndex.cs index 51b23280987..f8d01e24371 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/ReadIndex.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/ReadIndex.cs @@ -90,7 +90,7 @@ public ReadIndex(IPublisher bus, var eventTypeNames = streamNamesProvider.EventTypes; var streamExistenceFilterInitializer = streamNamesProvider.StreamExistenceFilterInitializer; - _indexWriter = new IndexWriter(indexBackend, _indexReader, _streamIds, _streamNames, systemStreams, emptyStreamName, sizer); + _indexWriter = new IndexWriter(indexBackend, _indexReader, _streamIds, _streamNames, systemStreams, emptyStreamName, tfTrackers, sizer); _indexCommitter = new IndexCommitter(bus, indexBackend, _indexReader, tableIndex, streamNameIndex, _streamNames, eventTypeIndex, eventTypeNames, systemStreams, streamExistenceFilter, streamExistenceFilterInitializer, indexCheckpoint, indexStatusTracker, indexTracker, tfTrackers, additionalCommitChecks); diff --git a/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs b/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs index 13b9aa44d6f..c990570abea 100644 --- a/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs +++ b/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs @@ -11,14 +11,16 @@ public class SystemAccounts { public static readonly ClaimsPrincipal System = new ClaimsPrincipal(new ClaimsIdentity(Claims, "system")); public static readonly ClaimsPrincipal Anonymous = new ClaimsPrincipal(new ClaimsIdentity(new Claim[]{new Claim(ClaimTypes.Anonymous, ""), })); + //qq risk if we create a claims principle for projections? //qq consider granularity public static readonly string SystemName = "system"; public static readonly string SystemIndexMergeName = "system-index-merge"; public static readonly string SystemIndexScavengeName = "system-index-scavenge"; - public static readonly string SystemRedactionName = "system-redaction"; public static readonly string SystemIndexCommitterName = "system-index-committer"; public static readonly string SystemPersistentSubscriptionsName = "system-persistent-subscriptions"; + public static readonly string SystemRedactionName = "system-redaction"; public static readonly string SystemScavengeName = "system-scavenge"; public static readonly string SystemSubscriptionsName = "system-subscriptions"; + public static readonly string SystemWriterName = "system-writer"; } } diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs index 4ddfbc5a6de..e92e408d63f 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs @@ -1264,7 +1264,7 @@ private bool TryCreateBulkMemReader(out TFChunkBulkReader reader) { Interlocked.Increment(ref _memStreamCount); var stream = new UnmanagedMemoryStream((byte*)_cachedData, _cachedLength); - reader = new TFChunkBulkReader(this, stream, isMemory: true); + reader = new TFChunkBulkReader(this, stream, isMemory: true); //qq tracking the bytes here would be nice, under a 'system-replication' return true; } } From d637d4630a1f0a146e101d67ffb8d732ca10e332 Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Sat, 23 Nov 2024 11:30:56 +0000 Subject: [PATCH 26/38] pass tracker on each call to TFChunkReader..... it seemed appearing to store the tracker in the reader but it makes more sense to do this in the lease --- .../Fakes/FakeTfReader.cs | 16 ++---- .../Index/FakeIndexReader.cs | 16 ++---- .../table_index_on_try_get_one_value_query.cs | 2 +- ...index_with_corrupt_index_entries_should.cs | 2 +- ...two_ptables_and_memtable_on_range_query.cs | 2 +- ..._hash_collision_when_upgrading_to_64bit.cs | 18 ++----- ...doesnt_exist_drops_entry_and_carries_on.cs | 18 ++----- ...upgrading_index_to_64bit_stream_version.cs | 18 ++----- .../Scavenge/when_scavenging_a_table_index.cs | 2 +- ...x_and_another_table_is_completed_during.cs | 2 +- ..._index_cancelled_while_scavenging_table.cs | 2 +- ..._index_cancelled_while_waiting_for_lock.cs | 2 +- .../when_scavenging_a_table_index_fails.cs | 2 +- .../when_having_TFLog_with_existing_epochs.cs | 5 +- ...aving_an_epoch_manager_and_empty_tf_log.cs | 5 +- .../Services/Storage/FakeInMemoryTFReader.cs | 14 ++--- .../HashCollisions/with_hash_collisions.cs | 18 ++----- .../when_reading_a_single_record.cs | 2 +- ...eading_an_empty_chunked_transaction_log.cs | 6 +-- ...sequentially_reading_db_with_few_chunks.cs | 12 ++--- ..._sequentially_reading_db_with_one_chunk.cs | 14 ++--- ...g_db_with_one_chunk_ending_with_prepare.cs | 2 +- .../when_writing_commit_record_to_file.cs | 2 +- .../when_writing_prepare_record_to_file.cs | 2 +- ...V2StreamExistenceFilterInitializerTests.cs | 4 +- .../LogV3/PartitionManagerTests.cs | 14 ++--- src/EventStore.Core/LogV3/PartitionManager.cs | 2 +- .../Storage/EpochManager/EpochManager.cs | 12 ++--- .../Chunks/TFChunk/ReaderWorkItem.cs | 2 + .../TransactionLog/Chunks/TFChunkChaser.cs | 2 +- .../TransactionLog/Chunks/TFChunkReader.cs | 54 ++++++++----------- .../TransactionLog/ITransactionFileReader.cs | 27 +++++----- 32 files changed, 116 insertions(+), 185 deletions(-) diff --git a/src/EventStore.Core.Tests/Fakes/FakeTfReader.cs b/src/EventStore.Core.Tests/Fakes/FakeTfReader.cs index 20e28a15f28..aa097f9c643 100644 --- a/src/EventStore.Core.Tests/Fakes/FakeTfReader.cs +++ b/src/EventStore.Core.Tests/Fakes/FakeTfReader.cs @@ -3,31 +3,23 @@ namespace EventStore.Core.Tests.Fakes { public class FakeTfReader : ITransactionFileReader { - public void OnCheckedOut(ITransactionFileTracker tracker) { - throw new NotImplementedException(); - } - - public void OnReturned() { - throw new NotImplementedException(); - } - public void Reposition(long position) { throw new NotImplementedException(); } - public SeqReadResult TryReadNext() { + public SeqReadResult TryReadNext(ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public SeqReadResult TryReadPrev() { + public SeqReadResult TryReadPrev(ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public RecordReadResult TryReadAt(long position, bool couldBeScavenged) { + public RecordReadResult TryReadAt(long position, bool couldBeScavenged, ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public bool ExistsAt(long position) { + public bool ExistsAt(long position, ITransactionFileTracker tracker) { return true; } } diff --git a/src/EventStore.Core.Tests/Index/FakeIndexReader.cs b/src/EventStore.Core.Tests/Index/FakeIndexReader.cs index 841d65a4d05..4360cce3a82 100644 --- a/src/EventStore.Core.Tests/Index/FakeIndexReader.cs +++ b/src/EventStore.Core.Tests/Index/FakeIndexReader.cs @@ -10,34 +10,26 @@ public FakeIndexReader(Func existsAt = null) { _existsAt = existsAt ?? (l => true); } - public void OnCheckedOut(ITransactionFileTracker tracker) { - throw new NotImplementedException(); - } - - public void OnReturned() { - throw new NotImplementedException(); - } - public void Reposition(long position) { throw new NotImplementedException(); } - public SeqReadResult TryReadNext() { + public SeqReadResult TryReadNext(ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public SeqReadResult TryReadPrev() { + public SeqReadResult TryReadPrev(ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public RecordReadResult TryReadAt(long position, bool couldBeScavenged) { + public RecordReadResult TryReadAt(long position, bool couldBeScavenged, ITransactionFileTracker tracker) { var record = (LogRecord)new PrepareLogRecord(position, Guid.NewGuid(), Guid.NewGuid(), 0, 0, position.ToString(), null, -1, DateTime.UtcNow, PrepareFlags.None, "type", null, new byte[0], null); return new RecordReadResult(true, position + 1, record, 1); } - public bool ExistsAt(long position) { + public bool ExistsAt(long position, ITransactionFileTracker tracker) { return _existsAt(position); } } diff --git a/src/EventStore.Core.Tests/Index/IndexV1/table_index_on_try_get_one_value_query.cs b/src/EventStore.Core.Tests/Index/IndexV1/table_index_on_try_get_one_value_query.cs index 62188d70438..c4fbc8d4616 100644 --- a/src/EventStore.Core.Tests/Index/IndexV1/table_index_on_try_get_one_value_query.cs +++ b/src/EventStore.Core.Tests/Index/IndexV1/table_index_on_try_get_one_value_query.cs @@ -33,7 +33,7 @@ public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); _indexDir = PathName; - var fakeReader = new TFReaderLease(new FakeTfReader()); + var fakeReader = new TFReaderLease(new FakeTfReader(), ITransactionFileTracker.NoOp); _lowHasher = new XXHashUnsafe(); _highHasher = new Murmur3AUnsafe(); _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", diff --git a/src/EventStore.Core.Tests/Index/IndexV1/table_index_with_corrupt_index_entries_should.cs b/src/EventStore.Core.Tests/Index/IndexV1/table_index_with_corrupt_index_entries_should.cs index c3e08acfd53..49736f58275 100644 --- a/src/EventStore.Core.Tests/Index/IndexV1/table_index_with_corrupt_index_entries_should.cs +++ b/src/EventStore.Core.Tests/Index/IndexV1/table_index_with_corrupt_index_entries_should.cs @@ -22,7 +22,7 @@ public void ConstructTableIndexWithCorruptIndexEntries(byte version, bool skipIn bool createForceVerifyFile = false) { var lowHasher = new XXHashUnsafe(); var highHasher = new Murmur3AUnsafe(); - var fakeReader = new TFReaderLease(new FakeIndexReader()); + var fakeReader = new TFReaderLease(new FakeIndexReader(), ITransactionFileTracker.NoOp); _tableIndex = new TableIndex(PathName, lowHasher, highHasher, "", () => new HashListMemTable(version, maxSize: NumIndexEntries), diff --git a/src/EventStore.Core.Tests/Index/IndexV1/table_index_with_two_ptables_and_memtable_on_range_query.cs b/src/EventStore.Core.Tests/Index/IndexV1/table_index_with_two_ptables_and_memtable_on_range_query.cs index f0b49bd2904..3403b7c2302 100644 --- a/src/EventStore.Core.Tests/Index/IndexV1/table_index_with_two_ptables_and_memtable_on_range_query.cs +++ b/src/EventStore.Core.Tests/Index/IndexV1/table_index_with_two_ptables_and_memtable_on_range_query.cs @@ -34,7 +34,7 @@ public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); _indexDir = PathName; - var fakeReader = new TFReaderLease(new FakeIndexReader()); + var fakeReader = new TFReaderLease(new FakeIndexReader(), ITransactionFileTracker.NoOp); _lowHasher = new FakeIndexHasher(); _highHasher = new FakeIndexHasher(); _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", diff --git a/src/EventStore.Core.Tests/Index/IndexV2/table_index_hash_collision_when_upgrading_to_64bit.cs b/src/EventStore.Core.Tests/Index/IndexV2/table_index_hash_collision_when_upgrading_to_64bit.cs index 2a1b33a3c66..e6c42a3c695 100644 --- a/src/EventStore.Core.Tests/Index/IndexV2/table_index_hash_collision_when_upgrading_to_64bit.cs +++ b/src/EventStore.Core.Tests/Index/IndexV2/table_index_hash_collision_when_upgrading_to_64bit.cs @@ -34,7 +34,7 @@ public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); _indexDir = PathName; - var fakeReader = new TFReaderLease(new FakeIndexReader()); + var fakeReader = new TFReaderLease(new FakeIndexReader(), ITransactionFileTracker.NoOp); _lowHasher = new XXHashUnsafe(); _highHasher = new Murmur3AUnsafe(); _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", @@ -146,34 +146,26 @@ public void should_have_entries_in_sorted_order() { } public class FakeIndexReader : ITransactionFileReader { - public void OnCheckedOut(ITransactionFileTracker tracker) { - throw new NotImplementedException(); - } - - public void OnReturned() { - throw new NotImplementedException(); - } - public void Reposition(long position) { throw new NotImplementedException(); } - public SeqReadResult TryReadNext() { + public SeqReadResult TryReadNext(ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public SeqReadResult TryReadPrev() { + public SeqReadResult TryReadPrev(ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public RecordReadResult TryReadAt(long position, bool couldBeScavenged) { + public RecordReadResult TryReadAt(long position, bool couldBeScavenged, ITransactionFileTracker tracker) { var record = (LogRecord)new PrepareLogRecord(position, Guid.NewGuid(), Guid.NewGuid(), 0, 0, position % 2 == 0 ? "account--696193173" : "LPN-FC002_LPK51001", null, -1, DateTime.UtcNow, PrepareFlags.None, "type", null, new byte[0], null); return new RecordReadResult(true, position + 1, record, 1); } - public bool ExistsAt(long position) { + public bool ExistsAt(long position, ITransactionFileTracker tracker) { return true; } } diff --git a/src/EventStore.Core.Tests/Index/IndexV2/table_index_when_merging_upgrading_to_64bit_if_entry_doesnt_exist_drops_entry_and_carries_on.cs b/src/EventStore.Core.Tests/Index/IndexV2/table_index_when_merging_upgrading_to_64bit_if_entry_doesnt_exist_drops_entry_and_carries_on.cs index 8240148c152..72271f2907f 100644 --- a/src/EventStore.Core.Tests/Index/IndexV2/table_index_when_merging_upgrading_to_64bit_if_entry_doesnt_exist_drops_entry_and_carries_on.cs +++ b/src/EventStore.Core.Tests/Index/IndexV2/table_index_when_merging_upgrading_to_64bit_if_entry_doesnt_exist_drops_entry_and_carries_on.cs @@ -58,7 +58,7 @@ public override async Task TestFixtureSetUp() { var emptyStreamId = LogFormatHelper.EmptyStreamId; _indexDir = PathName; - var fakeReader = new TFReaderLease(new FakeIndexReader2()); + var fakeReader = new TFReaderLease(new FakeIndexReader2(), ITransactionFileTracker.NoOp); _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, emptyStreamId, () => new HashListMemTable(PTableVersions.IndexV1, maxSize: 3), _ => fakeReader, @@ -135,27 +135,19 @@ public void should_have_all_entries_except_scavenged() { } private class FakeIndexReader2 : ITransactionFileReader { - public void OnCheckedOut(ITransactionFileTracker tracker) { - throw new NotImplementedException(); - } - - public void OnReturned() { - throw new NotImplementedException(); - } - public void Reposition(long position) { throw new NotImplementedException(); } - public SeqReadResult TryReadNext() { + public SeqReadResult TryReadNext(ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public SeqReadResult TryReadPrev() { + public SeqReadResult TryReadPrev(ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public RecordReadResult TryReadAt(long position, bool couldBeScavenged) { + public RecordReadResult TryReadAt(long position, bool couldBeScavenged, ITransactionFileTracker tracker) { TStreamId streamId; switch (position) { case 1: @@ -178,7 +170,7 @@ public RecordReadResult TryReadAt(long position, bool couldBeScavenged) { return new RecordReadResult(true, position + 1, record, 1); } - public bool ExistsAt(long position) { + public bool ExistsAt(long position, ITransactionFileTracker tracker) { return position != 2 && position != 1; } } diff --git a/src/EventStore.Core.Tests/Index/IndexV3/when_upgrading_index_to_64bit_stream_version.cs b/src/EventStore.Core.Tests/Index/IndexV3/when_upgrading_index_to_64bit_stream_version.cs index 3e0f0ced63b..78822478c04 100644 --- a/src/EventStore.Core.Tests/Index/IndexV3/when_upgrading_index_to_64bit_stream_version.cs +++ b/src/EventStore.Core.Tests/Index/IndexV3/when_upgrading_index_to_64bit_stream_version.cs @@ -25,7 +25,7 @@ public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); _indexDir = PathName; - var fakeReader = new TFReaderLease(new FakeIndexReader()); + var fakeReader = new TFReaderLease(new FakeIndexReader(), ITransactionFileTracker.NoOp); _lowHasher = new XXHashUnsafe(); _highHasher = new Murmur3AUnsafe(); _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", @@ -127,34 +127,26 @@ public void should_have_entries_in_sorted_order() { } public class FakeIndexReader : ITransactionFileReader { - public void OnCheckedOut(ITransactionFileTracker tracker) { - throw new NotImplementedException(); - } - - public void OnReturned() { - throw new NotImplementedException(); - } - public void Reposition(long position) { throw new NotImplementedException(); } - public SeqReadResult TryReadNext() { + public SeqReadResult TryReadNext(ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public SeqReadResult TryReadPrev() { + public SeqReadResult TryReadPrev(ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public RecordReadResult TryReadAt(long position, bool couldBeScavenged) { + public RecordReadResult TryReadAt(long position, bool couldBeScavenged, ITransactionFileTracker tracker) { var record = (LogRecord)new PrepareLogRecord(position, Guid.NewGuid(), Guid.NewGuid(), 0, 0, position % 2 == 0 ? "testStream-2" : "testStream-1", null, -1, DateTime.UtcNow, PrepareFlags.None, "type", null, new byte[0], null); return new RecordReadResult(true, position + 1, record, 1); } - public bool ExistsAt(long position) { + public bool ExistsAt(long position, ITransactionFileTracker tracker) { return true; } } diff --git a/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index.cs b/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index.cs index 922dfa4b89b..96d43259b24 100644 --- a/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index.cs +++ b/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index.cs @@ -33,7 +33,7 @@ public override async Task TestFixtureSetUp() { _indexDir = PathName; - var fakeReader = new TFReaderLease(new FakeIndexReader(l => !Deleted.Contains(l))); + var fakeReader = new TFReaderLease(new FakeIndexReader(l => !Deleted.Contains(l)), ITransactionFileTracker.NoOp); _lowHasher = new XXHashUnsafe(); _highHasher = new Murmur3AUnsafe(); diff --git a/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_and_another_table_is_completed_during.cs b/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_and_another_table_is_completed_during.cs index 4302ceae0cf..d9f90b591a7 100644 --- a/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_and_another_table_is_completed_during.cs +++ b/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_and_another_table_is_completed_during.cs @@ -40,7 +40,7 @@ public override async Task TestFixtureSetUp() { if (!scavengeBlocker.Wait(5000)) throw new Exception("Failed to continue."); return false; - })); + }), ITransactionFileTracker.NoOp); _lowHasher = new XXHashUnsafe(); _highHasher = new Murmur3AUnsafe(); diff --git a/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_cancelled_while_scavenging_table.cs b/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_cancelled_while_scavenging_table.cs index eaf87fb2002..ec8f282c24f 100644 --- a/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_cancelled_while_scavenging_table.cs +++ b/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_cancelled_while_scavenging_table.cs @@ -29,7 +29,7 @@ public override async Task TestFixtureSetUp() { var fakeReader = new TFReaderLease(new FakeIndexReader(l => { cancellationTokenSource.Cancel(); return true; - })); + }), ITransactionFileTracker.NoOp); _lowHasher = new XXHashUnsafe(); _highHasher = new Murmur3AUnsafe(); diff --git a/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_cancelled_while_waiting_for_lock.cs b/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_cancelled_while_waiting_for_lock.cs index a7a5b97e917..2a35c60e059 100644 --- a/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_cancelled_while_waiting_for_lock.cs +++ b/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_cancelled_while_waiting_for_lock.cs @@ -24,7 +24,7 @@ public override async Task TestFixtureSetUp() { _indexDir = PathName; - var fakeReader = new TFReaderLease(new FakeIndexReader()); + var fakeReader = new TFReaderLease(new FakeIndexReader(), ITransactionFileTracker.NoOp); _lowHasher = new XXHashUnsafe(); _highHasher = new Murmur3AUnsafe(); _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", diff --git a/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_fails.cs b/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_fails.cs index d0200209d01..98051a90578 100644 --- a/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_fails.cs +++ b/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_fails.cs @@ -31,7 +31,7 @@ public override async Task TestFixtureSetUp() { _indexDir = PathName; - var fakeReader = new TFReaderLease(new FakeIndexReader()); + var fakeReader = new TFReaderLease(new FakeIndexReader(), ITransactionFileTracker.NoOp); _lowHasher = new XXHashUnsafe(); _highHasher = new Murmur3AUnsafe(); _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", diff --git a/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_having_TFLog_with_existing_epochs.cs b/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_having_TFLog_with_existing_epochs.cs index 63410d1c3d2..e30951659b5 100644 --- a/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_having_TFLog_with_existing_epochs.cs +++ b/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_having_TFLog_with_existing_epochs.cs @@ -21,6 +21,7 @@ using EventStore.Common.Utils; using Newtonsoft.Json.Linq; using EventStore.Core.LogV3; +using EventStore.Core.TransactionLog; namespace EventStore.Core.Tests.Services.Storage { [TestFixture(typeof(LogFormat.V2), typeof(string))] @@ -260,10 +261,10 @@ public void can_add_epochs_to_cache() { Assert.AreEqual(2, epochsWritten.Length); for (int i = 0; i < epochsWritten.Length; i++) { _reader.Reposition(epochsWritten[i].Epoch.EpochPosition); - _reader.TryReadNext(); // read epoch + _reader.TryReadNext(ITransactionFileTracker.NoOp); // read epoch IPrepareLogRecord epochInfo; while (true) { - var result = _reader.TryReadNext(); + var result = _reader.TryReadNext(ITransactionFileTracker.NoOp); Assert.True(result.Success); if (result.LogRecord is IPrepareLogRecord prepare) { epochInfo = prepare; diff --git a/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_having_an_epoch_manager_and_empty_tf_log.cs b/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_having_an_epoch_manager_and_empty_tf_log.cs index 65b04bc6a51..f83cea7c3c0 100644 --- a/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_having_an_epoch_manager_and_empty_tf_log.cs +++ b/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_having_an_epoch_manager_and_empty_tf_log.cs @@ -21,6 +21,7 @@ using EventStore.Common.Utils; using EventStore.Core.LogAbstraction; using EventStore.Core.LogV3; +using EventStore.Core.TransactionLog; namespace EventStore.Core.Tests.Services.Storage { [TestFixture(typeof(LogFormat.V2), typeof(string))] @@ -159,10 +160,10 @@ public void can_write_epochs() { Assert.AreEqual(1 + 4 + 16, epochsWritten.Length); for (int i = 0; i < epochsWritten.Length; i++) { _reader.Reposition(epochsWritten[i].Epoch.EpochPosition); - _reader.TryReadNext(); // read epoch + _reader.TryReadNext(ITransactionFileTracker.NoOp); // read epoch IPrepareLogRecord epochInfo; while (true) { - var result = _reader.TryReadNext(); + var result = _reader.TryReadNext(ITransactionFileTracker.NoOp); Assert.True(result.Success); if (result.LogRecord is IPrepareLogRecord prepare) { epochInfo = prepare; diff --git a/src/EventStore.Core.Tests/Services/Storage/FakeInMemoryTFReader.cs b/src/EventStore.Core.Tests/Services/Storage/FakeInMemoryTFReader.cs index bdd97b5b153..d0dd38be63b 100644 --- a/src/EventStore.Core.Tests/Services/Storage/FakeInMemoryTFReader.cs +++ b/src/EventStore.Core.Tests/Services/Storage/FakeInMemoryTFReader.cs @@ -15,12 +15,6 @@ public FakeInMemoryTfReader(int recordOffset){ _recordOffset = recordOffset; } - public void OnCheckedOut(ITransactionFileTracker tracker) { - } - - public void OnReturned() { - } - public void AddRecord(ILogRecord record, long position){ _records.Add(position, record); } @@ -29,7 +23,7 @@ public void Reposition(long position) { _curPosition = position; } - public SeqReadResult TryReadNext() { + public SeqReadResult TryReadNext(ITransactionFileTracker tracker) { NumReads++; if (_records.ContainsKey(_curPosition)){ var pos = _curPosition; @@ -40,11 +34,11 @@ public SeqReadResult TryReadNext() { } } - public SeqReadResult TryReadPrev() { + public SeqReadResult TryReadPrev(ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public RecordReadResult TryReadAt(long position, bool couldBeScavenged) { + public RecordReadResult TryReadAt(long position, bool couldBeScavenged, ITransactionFileTracker tracker) { NumReads++; if (_records.ContainsKey(position)){ return new RecordReadResult(true, 0, _records[position], 0); @@ -53,7 +47,7 @@ public RecordReadResult TryReadAt(long position, bool couldBeScavenged) { } } - public bool ExistsAt(long position) { + public bool ExistsAt(long position, ITransactionFileTracker tracker) { return _records.ContainsKey(position); } } diff --git a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_hash_collisions.cs b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_hash_collisions.cs index e53081b4d5c..188b72e5b61 100644 --- a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_hash_collisions.cs +++ b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_hash_collisions.cs @@ -41,7 +41,7 @@ protected virtual void when() { public void Setup() { given(); _indexDir = PathName; - _fakeReader = new TFReaderLease(new FakeReader()); + _fakeReader = new TFReaderLease(new FakeReader(), ITransactionFileTracker.NoOp); _indexBackend = new FakeIndexBackend(_fakeReader); _logFormat = LogFormatHelper.LogFormatFactory.Create(new() { @@ -462,34 +462,26 @@ public EventStore.Core.Data.SystemSettings GetSystemSettings() { } public class FakeReader : ITransactionFileReader { - public void OnCheckedOut(ITransactionFileTracker tracker) { - throw new NotImplementedException(); - } - - public void OnReturned() { - throw new NotImplementedException(); - } - public void Reposition(long position) { throw new NotImplementedException(); } - public SeqReadResult TryReadNext() { + public SeqReadResult TryReadNext(ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public SeqReadResult TryReadPrev() { + public SeqReadResult TryReadPrev(ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public RecordReadResult TryReadAt(long position, bool couldBeScavenged) { + public RecordReadResult TryReadAt(long position, bool couldBeScavenged, ITransactionFileTracker tracker) { var record = (LogRecord)new PrepareLogRecord(position, Guid.NewGuid(), Guid.NewGuid(), 0, 0, position % 2 == 0 ? "account--696193173" : "LPN-FC002_LPK51001", null, -1, DateTime.UtcNow, PrepareFlags.None, "type", null, new byte[0], null); return new RecordReadResult(true, position + 1, record, 1); } - public bool ExistsAt(long position) { + public bool ExistsAt(long position, ITransactionFileTracker tracker) { return true; } } diff --git a/src/EventStore.Core.Tests/TransactionLog/when_reading_a_single_record.cs b/src/EventStore.Core.Tests/TransactionLog/when_reading_a_single_record.cs index 2123e8681c2..1ed9c35252a 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_reading_a_single_record.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_reading_a_single_record.cs @@ -86,7 +86,7 @@ public void all_records_can_be_read() { RecordReadResult res; for (var i = 0; i < RecordsCount; i++) { var rec = _records[i]; - res = reader.TryReadAt(rec.LogPosition, couldBeScavenged: true); + res = reader.TryReadAt(rec.LogPosition, couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(rec, res.LogRecord); diff --git a/src/EventStore.Core.Tests/TransactionLog/when_reading_an_empty_chunked_transaction_log.cs b/src/EventStore.Core.Tests/TransactionLog/when_reading_an_empty_chunked_transaction_log.cs index fafd13ba2d5..64f07208f02 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_reading_an_empty_chunked_transaction_log.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_reading_an_empty_chunked_transaction_log.cs @@ -20,7 +20,7 @@ public void try_read_returns_false_when_writer_checksum_is_zero() { db.Open(); var reader = new TFChunkReader(db, writerchk, 0); - Assert.IsFalse(reader.TryReadNext().Success); + Assert.IsFalse(reader.TryReadNext(ITransactionFileTracker.NoOp).Success); db.Close(); } @@ -37,7 +37,7 @@ public void try_read_does_not_cache_anything_and_returns_record_once_it_is_writt var reader = new TFChunkReader(db, writerchk, 0); - Assert.IsFalse(reader.TryReadNext().Success); + Assert.IsFalse(reader.TryReadNext(ITransactionFileTracker.NoOp).Success); var recordFactory = LogFormatHelper.RecordFactory; var streamId = LogFormatHelper.StreamId; @@ -48,7 +48,7 @@ public void try_read_does_not_cache_anything_and_returns_record_once_it_is_writt writer.Flush(); writer.Close(); - var res = reader.TryReadNext(); + var res = reader.TryReadNext(ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(rec, res.LogRecord); diff --git a/src/EventStore.Core.Tests/TransactionLog/when_sequentially_reading_db_with_few_chunks.cs b/src/EventStore.Core.Tests/TransactionLog/when_sequentially_reading_db_with_few_chunks.cs index 1952b77c05f..d85999d5046 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_sequentially_reading_db_with_few_chunks.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_sequentially_reading_db_with_few_chunks.cs @@ -85,7 +85,7 @@ public void all_records_could_be_read_with_forward_pass() { SeqReadResult res; int count = 0; - while ((res = seqReader.TryReadNext()).Success) { + while ((res = seqReader.TryReadNext(ITransactionFileTracker.NoOp)).Success) { var rec = _records[count]; Assert.AreEqual(rec, res.LogRecord); Assert.AreEqual(rec.LogPosition, res.RecordPrePosition); @@ -103,7 +103,7 @@ public void all_records_could_be_read_with_backward_pass() { SeqReadResult res; int count = 0; - while ((res = seqReader.TryReadPrev()).Success) { + while ((res = seqReader.TryReadPrev(ITransactionFileTracker.NoOp)).Success) { var rec = _records[RecordsCount - count - 1]; Assert.AreEqual(rec, res.LogRecord); Assert.AreEqual(rec.LogPosition, res.RecordPrePosition); @@ -121,7 +121,7 @@ public void all_records_could_be_read_doing_forward_backward_pass() { SeqReadResult res; int count1 = 0; - while ((res = seqReader.TryReadNext()).Success) { + while ((res = seqReader.TryReadNext(ITransactionFileTracker.NoOp)).Success) { var rec = _records[count1]; Assert.AreEqual(rec, res.LogRecord); Assert.AreEqual(rec.LogPosition, res.RecordPrePosition); @@ -133,7 +133,7 @@ public void all_records_could_be_read_doing_forward_backward_pass() { Assert.AreEqual(RecordsCount, count1); int count2 = 0; - while ((res = seqReader.TryReadPrev()).Success) { + while ((res = seqReader.TryReadPrev(ITransactionFileTracker.NoOp)).Success) { var rec = _records[RecordsCount - count2 - 1]; Assert.AreEqual(rec, res.LogRecord); Assert.AreEqual(rec.LogPosition, res.RecordPrePosition); @@ -152,7 +152,7 @@ public void records_can_be_read_forward_starting_from_any_position() { SeqReadResult res; int count = 0; - while ((res = seqReader.TryReadNext()).Success) { + while ((res = seqReader.TryReadNext(ITransactionFileTracker.NoOp)).Success) { var rec = _records[i + count]; Assert.AreEqual(rec, res.LogRecord); Assert.AreEqual(rec.LogPosition, res.RecordPrePosition); @@ -172,7 +172,7 @@ public void records_can_be_read_backward_starting_from_any_position() { SeqReadResult res; int count = 0; - while ((res = seqReader.TryReadPrev()).Success) { + while ((res = seqReader.TryReadPrev(ITransactionFileTracker.NoOp)).Success) { var rec = _records[i - count - 1]; Assert.AreEqual(rec, res.LogRecord); Assert.AreEqual(rec.LogPosition, res.RecordPrePosition); diff --git a/src/EventStore.Core.Tests/TransactionLog/when_sequentially_reading_db_with_one_chunk.cs b/src/EventStore.Core.Tests/TransactionLog/when_sequentially_reading_db_with_one_chunk.cs index 8b632096064..ae4cbe25f57 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_sequentially_reading_db_with_one_chunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_sequentially_reading_db_with_one_chunk.cs @@ -72,7 +72,7 @@ public void all_records_could_be_read_with_forward_pass() { SeqReadResult res; int count = 0; - while ((res = seqReader.TryReadNext()).Success) { + while ((res = seqReader.TryReadNext(ITransactionFileTracker.NoOp)).Success) { var rec = _records[count]; Assert.AreEqual(rec, res.LogRecord); Assert.AreEqual(rec.LogPosition, res.RecordPrePosition); @@ -90,7 +90,7 @@ public void only_the_last_record_is_marked_eof() { SeqReadResult res; int count = 0; - while ((res = seqReader.TryReadNext()).Success) { + while ((res = seqReader.TryReadNext(ITransactionFileTracker.NoOp)).Success) { ++count; Assert.AreEqual(count == RecordsCount, res.Eof); } @@ -104,7 +104,7 @@ public void all_records_could_be_read_with_backward_pass() { SeqReadResult res; int count = 0; - while ((res = seqReader.TryReadPrev()).Success) { + while ((res = seqReader.TryReadPrev(ITransactionFileTracker.NoOp)).Success) { var rec = _records[RecordsCount - count - 1]; Assert.AreEqual(rec, res.LogRecord); Assert.AreEqual(rec.LogPosition, res.RecordPrePosition); @@ -122,7 +122,7 @@ public void all_records_could_be_read_doing_forward_backward_pass() { SeqReadResult res; int count1 = 0; - while ((res = seqReader.TryReadNext()).Success) { + while ((res = seqReader.TryReadNext(ITransactionFileTracker.NoOp)).Success) { var rec = _records[count1]; Assert.AreEqual(rec, res.LogRecord); Assert.AreEqual(rec.LogPosition, res.RecordPrePosition); @@ -134,7 +134,7 @@ public void all_records_could_be_read_doing_forward_backward_pass() { Assert.AreEqual(RecordsCount, count1); int count2 = 0; - while ((res = seqReader.TryReadPrev()).Success) { + while ((res = seqReader.TryReadPrev(ITransactionFileTracker.NoOp)).Success) { var rec = _records[RecordsCount - count2 - 1]; Assert.AreEqual(rec, res.LogRecord); Assert.AreEqual(rec.LogPosition, res.RecordPrePosition); @@ -153,7 +153,7 @@ public void records_can_be_read_forward_starting_from_any_position() { SeqReadResult res; int count = 0; - while ((res = seqReader.TryReadNext()).Success) { + while ((res = seqReader.TryReadNext(ITransactionFileTracker.NoOp)).Success) { var rec = _records[i + count]; Assert.AreEqual(rec, res.LogRecord); Assert.AreEqual(rec.LogPosition, res.RecordPrePosition); @@ -173,7 +173,7 @@ public void records_can_be_read_backward_starting_from_any_position() { SeqReadResult res; int count = 0; - while ((res = seqReader.TryReadPrev()).Success) { + while ((res = seqReader.TryReadPrev(ITransactionFileTracker.NoOp)).Success) { var rec = _records[i - count - 1]; Assert.AreEqual(rec, res.LogRecord); Assert.AreEqual(rec.LogPosition, res.RecordPrePosition); diff --git a/src/EventStore.Core.Tests/TransactionLog/when_sequentially_reading_db_with_one_chunk_ending_with_prepare.cs b/src/EventStore.Core.Tests/TransactionLog/when_sequentially_reading_db_with_one_chunk_ending_with_prepare.cs index 5f1ed1e4d69..88606866f1b 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_sequentially_reading_db_with_one_chunk_ending_with_prepare.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_sequentially_reading_db_with_one_chunk_ending_with_prepare.cs @@ -83,7 +83,7 @@ public void only_the_last_record_is_marked_eof() { SeqReadResult res; int count = 0; - while ((res = seqReader.TryReadNext()).Success) { + while ((res = seqReader.TryReadNext(ITransactionFileTracker.NoOp)).Success) { ++count; Assert.AreEqual(count == RecordsCount, res.Eof); } diff --git a/src/EventStore.Core.Tests/TransactionLog/when_writing_commit_record_to_file.cs b/src/EventStore.Core.Tests/TransactionLog/when_writing_commit_record_to_file.cs index 2638cbcd542..baac60c5525 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_writing_commit_record_to_file.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_writing_commit_record_to_file.cs @@ -66,7 +66,7 @@ public void the_checksum_is_updated() { [Test] public void trying_to_read_past_writer_checksum_returns_false() { var reader = new TFChunkReader(_db, _writerCheckpoint); - Assert.IsFalse(reader.TryReadAt(_writerCheckpoint.Read(), couldBeScavenged: true).Success); + Assert.IsFalse(reader.TryReadAt(_writerCheckpoint.Read(), couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp).Success); } } } diff --git a/src/EventStore.Core.Tests/TransactionLog/when_writing_prepare_record_to_file.cs b/src/EventStore.Core.Tests/TransactionLog/when_writing_prepare_record_to_file.cs index 1772d558e4a..0fc8c5dfd0c 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_writing_prepare_record_to_file.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_writing_prepare_record_to_file.cs @@ -94,7 +94,7 @@ public void the_checksum_is_updated() { [Test] public void trying_to_read_past_writer_checksum_returns_false() { var reader = new TFChunkReader(_db, _writerCheckpoint); - Assert.IsFalse(reader.TryReadAt(_writerCheckpoint.Read(), couldBeScavenged: true).Success); + Assert.IsFalse(reader.TryReadAt(_writerCheckpoint.Read(), couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp).Success); } } } diff --git a/src/EventStore.Core.XUnit.Tests/LogV2/LogV2StreamExistenceFilterInitializerTests.cs b/src/EventStore.Core.XUnit.Tests/LogV2/LogV2StreamExistenceFilterInitializerTests.cs index 220cb1aefae..3e2863b7ad3 100644 --- a/src/EventStore.Core.XUnit.Tests/LogV2/LogV2StreamExistenceFilterInitializerTests.cs +++ b/src/EventStore.Core.XUnit.Tests/LogV2/LogV2StreamExistenceFilterInitializerTests.cs @@ -27,14 +27,14 @@ public LogV2StreamExistenceFilterInitializerTests() { version: PTableVersions.IndexV4, maxSize: 1_000_000 * 2), maxSizeForMemory: 100_000, - tfReaderFactory: tracker => new TFReaderLease(_log), + tfReaderFactory: tracker => new TFReaderLease(_log, ITransactionFileTracker.NoOp), ptableVersion: PTableVersions.IndexV4, maxAutoMergeIndexLevel: int.MaxValue, pTableMaxReaderCount: 5); _tableIndex.Initialize(0); _sut = new LogV2StreamExistenceFilterInitializer( - tfReaderFactory: tracker => new TFReaderLease(_log), + tfReaderFactory: tracker => new TFReaderLease(_log, ITransactionFileTracker.NoOp), tableIndex: _tableIndex); var hasher = new CompositeHasher(new XXHashUnsafe(), new Murmur3AUnsafe()); _filter = new MockExistenceFilter(hasher); diff --git a/src/EventStore.Core.XUnit.Tests/LogV3/PartitionManagerTests.cs b/src/EventStore.Core.XUnit.Tests/LogV3/PartitionManagerTests.cs index 38f7e90dc9a..fc762d5c10e 100644 --- a/src/EventStore.Core.XUnit.Tests/LogV3/PartitionManagerTests.cs +++ b/src/EventStore.Core.XUnit.Tests/LogV3/PartitionManagerTests.cs @@ -214,17 +214,11 @@ public FakeReader(Guid? rootPartitionId, Guid? rootPartitionTypeId, bool without } } - public void OnCheckedOut(ITransactionFileTracker tracker) { - } - - public void OnReturned() { - } - public void Reposition(long position) { _resultIndex = (int) position; } - public SeqReadResult TryReadNext() { + public SeqReadResult TryReadNext(ITransactionFileTracker tracker) { _readCount++; if(_resultIndex < _results.Count) @@ -233,15 +227,15 @@ public SeqReadResult TryReadNext() { return SeqReadResult.Failure; } - public SeqReadResult TryReadPrev() { + public SeqReadResult TryReadPrev(ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public RecordReadResult TryReadAt(long position, bool couldBeScavenged) { + public RecordReadResult TryReadAt(long position, bool couldBeScavenged, ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public bool ExistsAt(long position) { + public bool ExistsAt(long position, ITransactionFileTracker tracker) { return true; } } diff --git a/src/EventStore.Core/LogV3/PartitionManager.cs b/src/EventStore.Core/LogV3/PartitionManager.cs index 2fcb41b6327..6080c859d7f 100644 --- a/src/EventStore.Core/LogV3/PartitionManager.cs +++ b/src/EventStore.Core/LogV3/PartitionManager.cs @@ -82,7 +82,7 @@ private void EnsureRootPartitionIsWritten() { private void ReadRootPartition() { SeqReadResult result; _reader.Reposition(0); - while ((result = _reader.TryReadNext()).Success) { + while ((result = _reader.TryReadNext(ITransactionFileTracker.NoOp)).Success) { var rec = result.LogRecord; switch (rec.RecordType) { case LogRecordType.PartitionType: diff --git a/src/EventStore.Core/Services/Storage/EpochManager/EpochManager.cs b/src/EventStore.Core/Services/Storage/EpochManager/EpochManager.cs index f4d204604f5..937d6db4210 100644 --- a/src/EventStore.Core/Services/Storage/EpochManager/EpochManager.cs +++ b/src/EventStore.Core/Services/Storage/EpochManager/EpochManager.cs @@ -117,7 +117,7 @@ private void ReadEpochs(int maxEpochCount) { reader.Reposition(_writer.FlushedPosition); SeqReadResult result; - while ((result = reader.TryReadPrev()).Success) { + while ((result = reader.TryReadPrev(ITransactionFileTracker.NoOp)).Success) { var rec = result.LogRecord; if (rec.RecordType != LogRecordType.System || ((ISystemLogRecord)rec).SystemRecordType != SystemRecordType.Epoch) @@ -147,7 +147,7 @@ private void ReadEpochs(int maxEpochCount) { } } private EpochRecord ReadEpochAt(ITransactionFileReader reader, long epochPos) { - var result = reader.TryReadAt(epochPos, couldBeScavenged: false); + var result = reader.TryReadAt(epochPos, couldBeScavenged: false, tracker: ITransactionFileTracker.NoOp); if (!result.Success) throw new Exception($"Could not find Epoch record at LogPosition {epochPos}."); if (result.LogRecord.RecordType != LogRecordType.System) @@ -201,7 +201,7 @@ public EpochRecord GetEpochAfter(int epochNumber, bool throwIfNotFound) { try { epoch = firstEpoch; do { - var result = reader.TryReadAt(epoch.PrevEpochPosition, couldBeScavenged: false); + var result = reader.TryReadAt(epoch.PrevEpochPosition, couldBeScavenged: false, tracker: ITransactionFileTracker.NoOp); if (!result.Success) throw new Exception( $"Could not find Epoch record at LogPosition {epoch.PrevEpochPosition}."); @@ -255,7 +255,7 @@ public bool IsCorrectEpochAt(long epochPosition, int epochNumber, Guid epochId) // epochNumber < _minCachedEpochNumber var reader = _readers.Get(); try { - var res = reader.TryReadAt(epochPosition, couldBeScavenged: false); + var res = reader.TryReadAt(epochPosition, couldBeScavenged: false, tracker: ITransactionFileTracker.NoOp); if (!res.Success || res.LogRecord.RecordType != LogRecordType.System) return false; var sysRec = (ISystemLogRecord)res.LogRecord; @@ -381,13 +381,13 @@ bool TryGetExpectedVersionForEpochInformation(EpochRecord epoch, out long expect reader.Reposition(epoch.PrevEpochPosition); // read the epoch - var result = reader.TryReadNext(); + var result = reader.TryReadNext(ITransactionFileTracker.NoOp); if (!result.Success) return false; // read the epoch-information (if there is one) while (true) { - result = reader.TryReadNext(); + result = reader.TryReadNext(ITransactionFileTracker.NoOp); if (!result.Success) return false; diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/ReaderWorkItem.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/ReaderWorkItem.cs index dbdfe194235..bebc0881cbe 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/ReaderWorkItem.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/ReaderWorkItem.cs @@ -17,10 +17,12 @@ public ReaderWorkItem(Stream stream, BinaryReader reader, bool isMemory) { public ITransactionFileTracker Tracker { get; private set; } = ITransactionFileTracker.NoOp; + //qq is this always called? public void OnCheckedOut(ITransactionFileTracker tracker) { Tracker = tracker; } + //qq rename, this needs to be called before being returned public void OnReturned() { Tracker = ITransactionFileTracker.NoOp; } diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunkChaser.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunkChaser.cs index fcff4c9eff8..d5c7522cc7a 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunkChaser.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunkChaser.cs @@ -32,7 +32,7 @@ record = res.LogRecord; } public SeqReadResult TryReadNext() { - var res = _reader.TryReadNext(); + var res = _reader.TryReadNext(ITransactionFileTracker.NoOp); if (res.Success) _chaserCheckpoint.Write(res.RecordPostPosition); else diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunkReader.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunkReader.cs index 1d1560337df..c166aac7e77 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunkReader.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunkReader.cs @@ -23,8 +23,6 @@ public long CurrentPosition { private readonly TFChunkReaderExistsAtOptimizer _existsAtOptimizer; private readonly ILogger _log = Log.ForContext(); - private ITransactionFileTracker _tracker = ITransactionFileTracker.NoOp; - public TFChunkReader(TFChunkDb db, IReadOnlyCheckpoint writerCheckpoint, long initialPosition = 0, bool optimizeReadSideCache = false) { Ensure.NotNull(db, "dbConfig"); @@ -40,25 +38,15 @@ public TFChunkReader(TFChunkDb db, IReadOnlyCheckpoint writerCheckpoint, long in _existsAtOptimizer = TFChunkReaderExistsAtOptimizer.Instance; } - //qq are these always called? - //qqqqqq we actually probably dont want to put the tracker in here - public void OnCheckedOut(ITransactionFileTracker tracker) { - _tracker = tracker; - } - - public void OnReturned() { //qq rename, this needs to be called before being returned. same for readerworkitem - _tracker = ITransactionFileTracker.NoOp; - } - public void Reposition(long position) { _curPos = position; } - public SeqReadResult TryReadNext() { - return TryReadNextInternal(0); + public SeqReadResult TryReadNext(ITransactionFileTracker tracker) { + return TryReadNextInternal(0, tracker); } - private SeqReadResult TryReadNextInternal(int retries) { + private SeqReadResult TryReadNextInternal(int retries, ITransactionFileTracker tracker) { while (true) { var pos = _curPos; var writerChk = _writerCheckpoint.Read(); @@ -68,7 +56,7 @@ private SeqReadResult TryReadNextInternal(int retries) { var chunk = _db.Manager.GetChunkFor(pos); RecordReadResult result; try { - result = chunk.TryReadClosestForward(chunk.ChunkHeader.GetLocalLogPosition(pos), _tracker); + result = chunk.TryReadClosestForward(chunk.ChunkHeader.GetLocalLogPosition(pos), tracker); CountRead(chunk.IsCached); } catch (FileBeingDeletedException) { if (retries > MaxRetries) @@ -76,7 +64,7 @@ private SeqReadResult TryReadNextInternal(int retries) { string.Format( "Got a file that was being deleted {0} times from TFChunkDb, likely a bug there.", MaxRetries)); - return TryReadNextInternal(retries + 1); + return TryReadNextInternal(retries + 1, tracker); } if (result.Success) { @@ -93,11 +81,11 @@ private SeqReadResult TryReadNextInternal(int retries) { } } - public SeqReadResult TryReadPrev() { - return TryReadPrevInternal(0); + public SeqReadResult TryReadPrev(ITransactionFileTracker tracker) { + return TryReadPrevInternal(0, tracker); } - private SeqReadResult TryReadPrevInternal(int retries) { + private SeqReadResult TryReadPrevInternal(int retries, ITransactionFileTracker tracker) { while (true) { var pos = _curPos; var writerChk = _writerCheckpoint.Read(); @@ -121,15 +109,15 @@ private SeqReadResult TryReadPrevInternal(int retries) { RecordReadResult result; try { result = readLast - ? chunk.TryReadLast(_tracker) - : chunk.TryReadClosestBackward(chunk.ChunkHeader.GetLocalLogPosition(pos), _tracker); + ? chunk.TryReadLast(tracker) + : chunk.TryReadClosestBackward(chunk.ChunkHeader.GetLocalLogPosition(pos), tracker); CountRead(chunk.IsCached); } catch (FileBeingDeletedException) { if (retries > MaxRetries) throw new Exception(string.Format( "Got a file that was being deleted {0} times from TFChunkDb, likely a bug there.", MaxRetries)); - return TryReadPrevInternal(retries + 1); + return TryReadPrevInternal(retries + 1, tracker); } if (result.Success) { @@ -149,11 +137,11 @@ private SeqReadResult TryReadPrevInternal(int retries) { } } - public RecordReadResult TryReadAt(long position, bool couldBeScavenged) { - return TryReadAtInternal(position, couldBeScavenged, 0); + public RecordReadResult TryReadAt(long position, bool couldBeScavenged, ITransactionFileTracker tracker) { + return TryReadAtInternal(position, couldBeScavenged, 0, tracker); } - private RecordReadResult TryReadAtInternal(long position, bool couldBeScavenged, int retries) { + private RecordReadResult TryReadAtInternal(long position, bool couldBeScavenged, int retries, ITransactionFileTracker tracker) { var writerChk = _writerCheckpoint.Read(); if (position >= writerChk) { _log.Warning( @@ -165,20 +153,20 @@ private RecordReadResult TryReadAtInternal(long position, bool couldBeScavenged, var chunk = _db.Manager.GetChunkFor(position); try { CountRead(chunk.IsCached); - return chunk.TryReadAt(chunk.ChunkHeader.GetLocalLogPosition(position), couldBeScavenged, _tracker); + return chunk.TryReadAt(chunk.ChunkHeader.GetLocalLogPosition(position), couldBeScavenged, tracker); } catch (FileBeingDeletedException) { if (retries > MaxRetries) throw new FileBeingDeletedException( "Been told the file was deleted > MaxRetries times. Probably a problem in db."); - return TryReadAtInternal(position, couldBeScavenged, retries + 1); + return TryReadAtInternal(position, couldBeScavenged, retries + 1, tracker); } } - public bool ExistsAt(long position) { - return ExistsAtInternal(position, 0); + public bool ExistsAt(long position, ITransactionFileTracker tracker) { + return ExistsAtInternal(position, 0, tracker); } - private bool ExistsAtInternal(long position, int retries) { + private bool ExistsAtInternal(long position, int retries, ITransactionFileTracker tracker) { var writerChk = _writerCheckpoint.Read(); if (position >= writerChk) return false; @@ -188,12 +176,12 @@ private bool ExistsAtInternal(long position, int retries) { CountRead(chunk.IsCached); if (_optimizeReadSideCache) _existsAtOptimizer.Optimize(chunk); - return chunk.ExistsAt(chunk.ChunkHeader.GetLocalLogPosition(position), _tracker); + return chunk.ExistsAt(chunk.ChunkHeader.GetLocalLogPosition(position), tracker); } catch (FileBeingDeletedException) { if (retries > MaxRetries) throw new FileBeingDeletedException( "Been told the file was deleted > MaxRetries times. Probably a problem in db."); - return ExistsAtInternal(position, retries + 1); + return ExistsAtInternal(position, retries + 1, tracker); } } diff --git a/src/EventStore.Core/TransactionLog/ITransactionFileReader.cs b/src/EventStore.Core/TransactionLog/ITransactionFileReader.cs index 0ddfca19ef5..5e4d6a8fab2 100644 --- a/src/EventStore.Core/TransactionLog/ITransactionFileReader.cs +++ b/src/EventStore.Core/TransactionLog/ITransactionFileReader.cs @@ -3,36 +3,35 @@ namespace EventStore.Core.TransactionLog { public interface ITransactionFileReader { - void OnCheckedOut(ITransactionFileTracker tracker); - void OnReturned(); void Reposition(long position); - SeqReadResult TryReadNext(); - SeqReadResult TryReadPrev(); + SeqReadResult TryReadNext(ITransactionFileTracker tracker); + SeqReadResult TryReadPrev(ITransactionFileTracker tracker); - RecordReadResult TryReadAt(long position, bool couldBeScavenged); - bool ExistsAt(long position); + RecordReadResult TryReadAt(long position, bool couldBeScavenged, ITransactionFileTracker tracker); + bool ExistsAt(long position, ITransactionFileTracker tracker); } public readonly struct TFReaderLease : IDisposable { public readonly ITransactionFileReader Reader; + private readonly ITransactionFileTracker _tracker; private readonly ObjectPool _pool; public TFReaderLease(ObjectPool pool, ITransactionFileTracker tracker) { _pool = pool; + _tracker = tracker; Reader = pool.Get(); - Reader.OnCheckedOut(tracker); } - public TFReaderLease(ITransactionFileReader reader) { + // tests only + public TFReaderLease(ITransactionFileReader reader, ITransactionFileTracker tracker) { _pool = null; + _tracker = tracker; Reader = reader; - //qq what do we want to do about providing/clearing a tracker here? } void IDisposable.Dispose() { if (_pool != null) { - Reader.OnReturned(); _pool.Return(Reader); } } @@ -42,19 +41,19 @@ public void Reposition(long position) { } public SeqReadResult TryReadNext() { - return Reader.TryReadNext(); + return Reader.TryReadNext(_tracker); } public SeqReadResult TryReadPrev() { - return Reader.TryReadPrev(); + return Reader.TryReadPrev(_tracker); } public bool ExistsAt(long position) { - return Reader.ExistsAt(position); + return Reader.ExistsAt(position, _tracker); } public RecordReadResult TryReadAt(long position, bool couldBeScavenged) { - return Reader.TryReadAt(position, couldBeScavenged); + return Reader.TryReadAt(position, couldBeScavenged, _tracker); } } } From 8d2cc42e2a093579d71703563f4582ea239792cd Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Sun, 24 Nov 2024 07:45:42 +0000 Subject: [PATCH 27/38] more plumbing (epoch manager and telemetry service done) --- ...with_replication_service_and_epoch_manager.cs | 2 ++ .../when_having_TFLog_with_existing_epochs.cs | 1 + ...n_having_an_epoch_manager_and_empty_tf_log.cs | 1 + ...starting_having_TFLog_with_existing_epochs.cs | 3 +++ .../when_starting_having_TFLog_with_no_epochs.cs | 2 ++ .../when_truncating_the_epoch_checkpoint.cs | 2 ++ .../Telemetry/TelemetryServiceTests.cs | 2 ++ src/EventStore.Core/ClusterVNode.cs | 2 ++ .../Storage/EpochManager/EpochManager.cs | 16 ++++++++++------ .../Services/UserManagement/SystemAccounts.cs | 2 ++ .../Telemetry/TelemetryService.cs | 6 +++++- .../TransactionLog/ITransactionFileReader.cs | 1 - 12 files changed, 32 insertions(+), 8 deletions(-) diff --git a/src/EventStore.Core.Tests/Services/Replication/LeaderReplication/with_replication_service_and_epoch_manager.cs b/src/EventStore.Core.Tests/Services/Replication/LeaderReplication/with_replication_service_and_epoch_manager.cs index 3ad35d4a4ab..575b04d2b82 100644 --- a/src/EventStore.Core.Tests/Services/Replication/LeaderReplication/with_replication_service_and_epoch_manager.cs +++ b/src/EventStore.Core.Tests/Services/Replication/LeaderReplication/with_replication_service_and_epoch_manager.cs @@ -17,6 +17,7 @@ using EventStore.Core.Tests.Authorization; using EventStore.Core.Tests.Helpers; using EventStore.Core.Tests.Services.Transport.Tcp; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Checkpoint; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.FileNamingStrategy; @@ -74,6 +75,7 @@ public override async Task TestFixtureSetUp() { _logFormat.CreatePartitionManager( reader: new TFChunkReader(Db, Db.Config.WriterCheckpoint), writer: Writer), + ITransactionFileTrackerFactory.NoOp, Guid.NewGuid()); Service = new LeaderReplicationService( Publisher, diff --git a/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_having_TFLog_with_existing_epochs.cs b/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_having_TFLog_with_existing_epochs.cs index e30951659b5..9e34d0e823d 100644 --- a/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_having_TFLog_with_existing_epochs.cs +++ b/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_having_TFLog_with_existing_epochs.cs @@ -57,6 +57,7 @@ private EpochManager GetManager() { _logFormat.CreatePartitionManager( reader: new TFChunkReader(_db, _db.Config.WriterCheckpoint), writer: _writer), + ITransactionFileTrackerFactory.NoOp, _instanceId); } private LinkedList GetCache(EpochManager manager) { diff --git a/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_having_an_epoch_manager_and_empty_tf_log.cs b/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_having_an_epoch_manager_and_empty_tf_log.cs index f83cea7c3c0..7dd7b05f560 100644 --- a/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_having_an_epoch_manager_and_empty_tf_log.cs +++ b/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_having_an_epoch_manager_and_empty_tf_log.cs @@ -56,6 +56,7 @@ private EpochManager GetManager() { _logFormat.CreatePartitionManager( reader: new TFChunkReader(_db, _db.Config.WriterCheckpoint), writer: _writer), + ITransactionFileTrackerFactory.NoOp, _instanceId); } private LinkedList GetCache(EpochManager manager) { diff --git a/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_starting_having_TFLog_with_existing_epochs.cs b/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_starting_having_TFLog_with_existing_epochs.cs index 1f5e822932f..c9f25b1b2bc 100644 --- a/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_starting_having_TFLog_with_existing_epochs.cs +++ b/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_starting_having_TFLog_with_existing_epochs.cs @@ -18,6 +18,7 @@ using EventStore.Core.TransactionLog.LogRecords; using System.Threading; using EventStore.Core.LogAbstraction; +using EventStore.Core.TransactionLog; namespace EventStore.Core.Tests.Services.Storage { [TestFixture(typeof(LogFormat.V2), typeof(string))] @@ -54,6 +55,7 @@ private EpochManager GetManager() { _logFormat.CreatePartitionManager( reader: new TFChunkReader(_db, _db.Config.WriterCheckpoint), writer: _writer), + ITransactionFileTrackerFactory.NoOp, _instanceId); } private LinkedList GetCache(EpochManager manager) { @@ -129,6 +131,7 @@ public void starting_epoch_manager_with_cache_larger_than_epoch_count_loads_all_ _logFormat.CreatePartitionManager( reader: new TFChunkReader(_db, _db.Config.WriterCheckpoint), writer: _writer), + ITransactionFileTrackerFactory.NoOp, _instanceId); _epochManager.Init(); _cache = GetCache(_epochManager); diff --git a/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_starting_having_TFLog_with_no_epochs.cs b/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_starting_having_TFLog_with_no_epochs.cs index 029f3f72171..04ca05062af 100644 --- a/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_starting_having_TFLog_with_no_epochs.cs +++ b/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_starting_having_TFLog_with_no_epochs.cs @@ -18,6 +18,7 @@ using EventStore.Core.TransactionLog.LogRecords; using System.Threading; using EventStore.Core.LogAbstraction; +using EventStore.Core.TransactionLog; namespace EventStore.Core.Tests.Services.Storage { [TestFixture(typeof(LogFormat.V2), typeof(string))] @@ -54,6 +55,7 @@ private EpochManager GetManager() { _logFormat.CreatePartitionManager( reader: new TFChunkReader(_db, _db.Config.WriterCheckpoint), writer: _writer), + ITransactionFileTrackerFactory.NoOp, _instanceId); } private LinkedList GetCache(EpochManager manager) { diff --git a/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_truncating_the_epoch_checkpoint.cs b/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_truncating_the_epoch_checkpoint.cs index 05de65b2346..6b760e596d5 100644 --- a/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_truncating_the_epoch_checkpoint.cs +++ b/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_truncating_the_epoch_checkpoint.cs @@ -8,6 +8,7 @@ using NUnit.Framework; using EventStore.Core.Services.Storage.EpochManager; using EventStore.Core.TransactionLog.LogRecords; +using EventStore.Core.TransactionLog; namespace EventStore.Core.Tests.Services.Storage.EpochManager { public abstract class @@ -61,6 +62,7 @@ public void SetUp() { _logFormat.CreatePartitionManager( reader: new TFChunkReader(_db, _db.Config.WriterCheckpoint), writer: _writer), + ITransactionFileTrackerFactory.NoOp, _instanceId); _epochManager.Init(); diff --git a/src/EventStore.Core.XUnit.Tests/Telemetry/TelemetryServiceTests.cs b/src/EventStore.Core.XUnit.Tests/Telemetry/TelemetryServiceTests.cs index 09225c3c4df..e38d8bc7842 100644 --- a/src/EventStore.Core.XUnit.Tests/Telemetry/TelemetryServiceTests.cs +++ b/src/EventStore.Core.XUnit.Tests/Telemetry/TelemetryServiceTests.cs @@ -7,6 +7,7 @@ using EventStore.Core.Services.TimerService; using EventStore.Core.Telemetry; using EventStore.Core.Tests.TransactionLog; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Checkpoint; using EventStore.Core.TransactionLog.Chunks; using Xunit; @@ -33,6 +34,7 @@ public TelemetryServiceTests() { new EnvelopePublisher(new ChannelEnvelope(channel)), _sink, new InMemoryCheckpoint(0), + ITransactionFileTrackerFactory.NoOp, Guid.NewGuid()); } diff --git a/src/EventStore.Core/ClusterVNode.cs b/src/EventStore.Core/ClusterVNode.cs index 185b5c79a1a..172c034360a 100644 --- a/src/EventStore.Core/ClusterVNode.cs +++ b/src/EventStore.Core/ClusterVNode.cs @@ -712,6 +712,7 @@ TFChunkDbConfig CreateDbConfig( logFormat.StreamNameIndex, logFormat.EventTypeIndex, partitionManager, + ITransactionFileTrackerFactory.NoOp, NodeInfo.InstanceId); epochManager.Init(); @@ -1452,6 +1453,7 @@ GossipAdvertiseInfo GetGossipAdvertiseInfo() { _mainQueue, new TelemetrySink(options.Application.TelemetryOptout), Db.Config.WriterCheckpoint.AsReadOnly(), + trackers.TransactionFileTrackers, memberInfo.InstanceId); _mainBus.Subscribe(telemetryService); _mainBus.Subscribe(telemetryService); diff --git a/src/EventStore.Core/Services/Storage/EpochManager/EpochManager.cs b/src/EventStore.Core/Services/Storage/EpochManager/EpochManager.cs index 937d6db4210..e87143d2f14 100644 --- a/src/EventStore.Core/Services/Storage/EpochManager/EpochManager.cs +++ b/src/EventStore.Core/Services/Storage/EpochManager/EpochManager.cs @@ -14,6 +14,7 @@ using EventStore.Core.TransactionLog.LogRecords; using ILogger = Serilog.ILogger; using EventStore.LogCommon; +using EventStore.Core.Services.UserManagement; namespace EventStore.Core.Services.Storage.EpochManager { public abstract class Epochmanager { @@ -30,6 +31,7 @@ public class EpochManager : IEpochManager { private readonly INameIndex _streamNameIndex; private readonly INameIndex _eventTypeIndex; private readonly IPartitionManager _partitionManager; + private readonly ITransactionFileTracker _tfTracker; private readonly Guid _instanceId; private readonly object _locker = new object(); @@ -77,6 +79,7 @@ public EpochManager(IPublisher bus, INameIndex streamNameIndex, INameIndex eventTypeIndex, IPartitionManager partitionManager, + ITransactionFileTrackerFactory tfTrackers, Guid instanceId) { Ensure.NotNull(bus, "bus"); Ensure.Nonnegative(cachedEpochCount, "cachedEpochCount"); @@ -99,6 +102,7 @@ public EpochManager(IPublisher bus, _streamNameIndex = streamNameIndex; _eventTypeIndex = eventTypeIndex; _partitionManager = partitionManager; + _tfTracker = tfTrackers.GetOrAdd(SystemAccounts.SystemEpochManagerName); _instanceId = instanceId; } @@ -117,7 +121,7 @@ private void ReadEpochs(int maxEpochCount) { reader.Reposition(_writer.FlushedPosition); SeqReadResult result; - while ((result = reader.TryReadPrev(ITransactionFileTracker.NoOp)).Success) { + while ((result = reader.TryReadPrev(_tfTracker)).Success) { var rec = result.LogRecord; if (rec.RecordType != LogRecordType.System || ((ISystemLogRecord)rec).SystemRecordType != SystemRecordType.Epoch) @@ -147,7 +151,7 @@ private void ReadEpochs(int maxEpochCount) { } } private EpochRecord ReadEpochAt(ITransactionFileReader reader, long epochPos) { - var result = reader.TryReadAt(epochPos, couldBeScavenged: false, tracker: ITransactionFileTracker.NoOp); + var result = reader.TryReadAt(epochPos, couldBeScavenged: false, tracker: _tfTracker); if (!result.Success) throw new Exception($"Could not find Epoch record at LogPosition {epochPos}."); if (result.LogRecord.RecordType != LogRecordType.System) @@ -201,7 +205,7 @@ public EpochRecord GetEpochAfter(int epochNumber, bool throwIfNotFound) { try { epoch = firstEpoch; do { - var result = reader.TryReadAt(epoch.PrevEpochPosition, couldBeScavenged: false, tracker: ITransactionFileTracker.NoOp); + var result = reader.TryReadAt(epoch.PrevEpochPosition, couldBeScavenged: false, tracker: _tfTracker); if (!result.Success) throw new Exception( $"Could not find Epoch record at LogPosition {epoch.PrevEpochPosition}."); @@ -255,7 +259,7 @@ public bool IsCorrectEpochAt(long epochPosition, int epochNumber, Guid epochId) // epochNumber < _minCachedEpochNumber var reader = _readers.Get(); try { - var res = reader.TryReadAt(epochPosition, couldBeScavenged: false, tracker: ITransactionFileTracker.NoOp); + var res = reader.TryReadAt(epochPosition, couldBeScavenged: false, tracker: _tfTracker); if (!res.Success || res.LogRecord.RecordType != LogRecordType.System) return false; var sysRec = (ISystemLogRecord)res.LogRecord; @@ -381,13 +385,13 @@ bool TryGetExpectedVersionForEpochInformation(EpochRecord epoch, out long expect reader.Reposition(epoch.PrevEpochPosition); // read the epoch - var result = reader.TryReadNext(ITransactionFileTracker.NoOp); + var result = reader.TryReadNext(_tfTracker); if (!result.Success) return false; // read the epoch-information (if there is one) while (true) { - result = reader.TryReadNext(ITransactionFileTracker.NoOp); + result = reader.TryReadNext(_tfTracker); if (!result.Success) return false; diff --git a/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs b/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs index c990570abea..8555e0e604e 100644 --- a/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs +++ b/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs @@ -13,6 +13,7 @@ public class SystemAccounts { //qq risk if we create a claims principle for projections? //qq consider granularity + public static readonly string SystemEpochManagerName = "system-epoch-manager"; public static readonly string SystemName = "system"; public static readonly string SystemIndexMergeName = "system-index-merge"; public static readonly string SystemIndexScavengeName = "system-index-scavenge"; @@ -21,6 +22,7 @@ public class SystemAccounts { public static readonly string SystemRedactionName = "system-redaction"; public static readonly string SystemScavengeName = "system-scavenge"; public static readonly string SystemSubscriptionsName = "system-subscriptions"; + public static readonly string SystemTelemetryName = "system-telemetry"; public static readonly string SystemWriterName = "system-writer"; } } diff --git a/src/EventStore.Core/Telemetry/TelemetryService.cs b/src/EventStore.Core/Telemetry/TelemetryService.cs index 4d498848781..4efb7c91c8f 100644 --- a/src/EventStore.Core/Telemetry/TelemetryService.cs +++ b/src/EventStore.Core/Telemetry/TelemetryService.cs @@ -9,6 +9,7 @@ using EventStore.Core.Messages; using EventStore.Core.Messaging; using EventStore.Core.Services.TimerService; +using EventStore.Core.Services.UserManagement; using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Checkpoint; using EventStore.Core.TransactionLog.Chunks; @@ -30,6 +31,7 @@ public sealed class TelemetryService : IDisposable, private readonly CancellationTokenSource _cts = new(); private readonly IPublisher _publisher; private readonly IReadOnlyCheckpoint _writerCheckpoint; + private readonly ITransactionFileTracker _tfTracker; private readonly DateTime _startTime = DateTime.UtcNow; private readonly Guid _nodeId; private readonly TFChunkManager _manager; @@ -45,12 +47,14 @@ public TelemetryService( IPublisher publisher, ITelemetrySink sink, IReadOnlyCheckpoint writerCheckpoint, + ITransactionFileTrackerFactory tfTrackers, Guid nodeId) { _manager = manager; _nodeOptions = nodeOptions; _publisher = publisher; _writerCheckpoint = writerCheckpoint; + _tfTracker = tfTrackers.GetOrAdd(SystemAccounts.SystemTelemetryName); _nodeId = nodeId; Task.Run(async () => { try { @@ -179,7 +183,7 @@ private static void OnGossipReceived(IEnvelope envelo private void ReadFirstEpoch() { try { var chunk = _manager.GetChunkFor(0); - var result = chunk.TryReadAt(0, false, ITransactionFileTracker.NoOp); // noop ok, immaterial + var result = chunk.TryReadAt(0, false, _tfTracker); if (!result.Success) return; diff --git a/src/EventStore.Core/TransactionLog/ITransactionFileReader.cs b/src/EventStore.Core/TransactionLog/ITransactionFileReader.cs index 5e4d6a8fab2..879cf4b8c52 100644 --- a/src/EventStore.Core/TransactionLog/ITransactionFileReader.cs +++ b/src/EventStore.Core/TransactionLog/ITransactionFileReader.cs @@ -23,7 +23,6 @@ public TFReaderLease(ObjectPool pool, ITransactionFileTr Reader = pool.Get(); } - // tests only public TFReaderLease(ITransactionFileReader reader, ITransactionFileTracker tracker) { _pool = null; _tracker = tracker; From 4a783c1cd1c58c4d8f52ea9f7d8cfc544197eb4d Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Sun, 24 Nov 2024 07:52:37 +0000 Subject: [PATCH 28/38] more plumbing (chaser done) --- .../Storage/Chaser/with_storage_chaser_service.cs | 3 ++- .../when_chasing_a_chunked_transaction_log.cs | 11 ++++++----- .../when_creating_chunked_transaction_chaser.cs | 6 +++--- .../when_writing_commit_record_to_file.cs | 2 +- .../when_writing_prepare_record_to_file.cs | 2 +- src/EventStore.Core/ClusterVNode.cs | 3 ++- .../Services/UserManagement/SystemAccounts.cs | 1 + .../TransactionLog/Chunks/TFChunkChaser.cs | 7 +++++-- 8 files changed, 21 insertions(+), 14 deletions(-) diff --git a/src/EventStore.Core.Tests/Services/Storage/Chaser/with_storage_chaser_service.cs b/src/EventStore.Core.Tests/Services/Storage/Chaser/with_storage_chaser_service.cs index 327f664a850..059312c3f0e 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Chaser/with_storage_chaser_service.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Chaser/with_storage_chaser_service.cs @@ -7,6 +7,7 @@ using EventStore.Core.Services.Storage; using EventStore.Core.Services.Storage.EpochManager; using EventStore.Core.Tests.Services.ElectionsService; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Checkpoint; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.FileNamingStrategy; @@ -39,7 +40,7 @@ public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); Db = new TFChunkDb(CreateDbConfig()); Db.Open(); - Chaser = new TFChunkChaser(Db, _writerChk, _chaserChk, false); + Chaser = new TFChunkChaser(Db, _writerChk, _chaserChk, false, ITransactionFileTracker.NoOp); Chaser.Open(); Writer = new TFChunkWriter(Db); Writer.Open(); diff --git a/src/EventStore.Core.Tests/TransactionLog/when_chasing_a_chunked_transaction_log.cs b/src/EventStore.Core.Tests/TransactionLog/when_chasing_a_chunked_transaction_log.cs index d1534cbb998..2e12a4ace84 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_chasing_a_chunked_transaction_log.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_chasing_a_chunked_transaction_log.cs @@ -1,6 +1,7 @@ using System; using System.IO; using EventStore.Core.LogAbstraction; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Checkpoint; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.Chunks.TFChunk; @@ -34,7 +35,7 @@ public void try_read_returns_false_when_writer_checkpoint_is_zero() { var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, writerchk, chaserchk)); db.Open(); - var chaser = new TFChunkChaser(db, writerchk, new InMemoryCheckpoint(), false); + var chaser = new TFChunkChaser(db, writerchk, new InMemoryCheckpoint(), false, ITransactionFileTracker.NoOp); chaser.Open(); ILogRecord record; @@ -55,7 +56,7 @@ public void try_read_returns_false_when_writer_checksum_is_equal_to_reader_check chaserchk.Write(12); chaserchk.Flush(); - var chaser = new TFChunkChaser(db, writerchk, chaserchk, false); + var chaser = new TFChunkChaser(db, writerchk, chaserchk, false, ITransactionFileTracker.NoOp); chaser.Open(); ILogRecord record; @@ -102,7 +103,7 @@ public void try_read_returns_record_when_writerchecksum_ahead() { var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, writerchk, chaserchk)); db.Open(); - var chaser = new TFChunkChaser(db, writerchk, chaserchk, false); + var chaser = new TFChunkChaser(db, writerchk, chaserchk, false, ITransactionFileTracker.NoOp); chaser.Open(); ILogRecord record; @@ -151,7 +152,7 @@ public void try_read_returns_record_when_record_bigger_than_internal_buffer() { writerchk.Write(recordToWrite.GetSizeWithLengthPrefixAndSuffix()); - var reader = new TFChunkChaser(db, writerchk, chaserchk, false); + var reader = new TFChunkChaser(db, writerchk, chaserchk, false, ITransactionFileTracker.NoOp); reader.Open(); ILogRecord record; @@ -198,7 +199,7 @@ public void try_read_returns_record_when_writerchecksum_equal() { writerchk.Write(recordToWrite.GetSizeWithLengthPrefixAndSuffix()); - var chaser = new TFChunkChaser(db, writerchk, chaserchk, false); + var chaser = new TFChunkChaser(db, writerchk, chaserchk, false, ITransactionFileTracker.NoOp); chaser.Open(); ILogRecord record; diff --git a/src/EventStore.Core.Tests/TransactionLog/when_creating_chunked_transaction_chaser.cs b/src/EventStore.Core.Tests/TransactionLog/when_creating_chunked_transaction_chaser.cs index 093f5176c81..224d96af9cb 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_creating_chunked_transaction_chaser.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_creating_chunked_transaction_chaser.cs @@ -12,19 +12,19 @@ public class when_creating_chunked_transaction_chaser : SpecificationWithDirecto [Test] public void a_null_file_config_throws_argument_null_exception() { Assert.Throws( - () => new TFChunkChaser(null, new InMemoryCheckpoint(0), new InMemoryCheckpoint(0), false)); + () => new TFChunkChaser(null, new InMemoryCheckpoint(0), new InMemoryCheckpoint(0), false, ITransactionFileTracker.NoOp)); } [Test] public void a_null_writer_checksum_throws_argument_null_exception() { using var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, 0)); - Assert.Throws(() => new TFChunkChaser(db, null, new InMemoryCheckpoint(), false)); + Assert.Throws(() => new TFChunkChaser(db, null, new InMemoryCheckpoint(), false, ITransactionFileTracker.NoOp)); } [Test] public void a_null_chaser_checksum_throws_argument_null_exception() { using var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, 0)); - Assert.Throws(() => new TFChunkChaser(db, new InMemoryCheckpoint(), null, false)); + Assert.Throws(() => new TFChunkChaser(db, new InMemoryCheckpoint(), null, false, ITransactionFileTracker.NoOp)); } } } diff --git a/src/EventStore.Core.Tests/TransactionLog/when_writing_commit_record_to_file.cs b/src/EventStore.Core.Tests/TransactionLog/when_writing_commit_record_to_file.cs index baac60c5525..5207c3b66c7 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_writing_commit_record_to_file.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_writing_commit_record_to_file.cs @@ -43,7 +43,7 @@ public void Teardown() { [Test] public void the_data_is_written() { - using (var reader = new TFChunkChaser(_db, _writerCheckpoint, _db.Config.ChaserCheckpoint, false)) { + using (var reader = new TFChunkChaser(_db, _writerCheckpoint, _db.Config.ChaserCheckpoint, false, ITransactionFileTracker.NoOp)) { reader.Open(); ILogRecord r; Assert.IsTrue(reader.TryReadNext(out r)); diff --git a/src/EventStore.Core.Tests/TransactionLog/when_writing_prepare_record_to_file.cs b/src/EventStore.Core.Tests/TransactionLog/when_writing_prepare_record_to_file.cs index 0fc8c5dfd0c..90562f9834e 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_writing_prepare_record_to_file.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_writing_prepare_record_to_file.cs @@ -60,7 +60,7 @@ public void Teardown() { [Test] public void the_data_is_written() { //TODO MAKE THIS ACTUALLY ASSERT OFF THE FILE AND READER FROM KNOWN FILE - using (var reader = new TFChunkChaser(_db, _writerCheckpoint, _db.Config.ChaserCheckpoint, false)) { + using (var reader = new TFChunkChaser(_db, _writerCheckpoint, _db.Config.ChaserCheckpoint, false, ITransactionFileTracker.NoOp)) { reader.Open(); ILogRecord r; Assert.IsTrue(reader.TryReadNext(out r)); diff --git a/src/EventStore.Core/ClusterVNode.cs b/src/EventStore.Core/ClusterVNode.cs index 172c034360a..6399d660e72 100644 --- a/src/EventStore.Core/ClusterVNode.cs +++ b/src/EventStore.Core/ClusterVNode.cs @@ -798,7 +798,8 @@ TFChunkDbConfig CreateDbConfig( Db, Db.Config.WriterCheckpoint.AsReadOnly(), Db.Config.ChaserCheckpoint, - Db.Config.OptimizeReadSideCache); + Db.Config.OptimizeReadSideCache, + trackers.TransactionFileTrackers.For(SystemAccounts.SystemChaserName)); var storageChaser = new StorageChaser( _mainQueue, diff --git a/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs b/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs index 8555e0e604e..ce28aabe7f9 100644 --- a/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs +++ b/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs @@ -13,6 +13,7 @@ public class SystemAccounts { //qq risk if we create a claims principle for projections? //qq consider granularity + public static readonly string SystemChaserName = "system-chaser"; public static readonly string SystemEpochManagerName = "system-epoch-manager"; public static readonly string SystemName = "system"; public static readonly string SystemIndexMergeName = "system-index-merge"; diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunkChaser.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunkChaser.cs index d5c7522cc7a..aeae97b9e14 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunkChaser.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunkChaser.cs @@ -9,15 +9,18 @@ public ICheckpoint Checkpoint { } private readonly ICheckpoint _chaserCheckpoint; + private readonly ITransactionFileTracker _tfTracker; private readonly TFChunkReader _reader; public TFChunkChaser(TFChunkDb db, IReadOnlyCheckpoint writerCheckpoint, ICheckpoint chaserCheckpoint, - bool optimizeReadSideCache) { + bool optimizeReadSideCache, + ITransactionFileTracker tfTracker) { Ensure.NotNull(db, "dbConfig"); Ensure.NotNull(writerCheckpoint, "writerCheckpoint"); Ensure.NotNull(chaserCheckpoint, "chaserCheckpoint"); _chaserCheckpoint = chaserCheckpoint; + _tfTracker = tfTracker; _reader = new TFChunkReader(db, writerCheckpoint, _chaserCheckpoint.Read(), optimizeReadSideCache); } @@ -32,7 +35,7 @@ record = res.LogRecord; } public SeqReadResult TryReadNext() { - var res = _reader.TryReadNext(ITransactionFileTracker.NoOp); + var res = _reader.TryReadNext(_tfTracker); if (res.Success) _chaserCheckpoint.Write(res.RecordPostPosition); else From d1e88430a7604d2108eb86fa258cdfc0ad4f3042 Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Sun, 24 Nov 2024 08:02:42 +0000 Subject: [PATCH 29/38] consistency --- .../RedactionService/RedactionServiceTestFixture.cs | 2 +- .../Services/Storage/ReadIndexTestScenario.cs | 2 +- .../Services/Storage/WriteEventsToIndexScenario.cs | 4 ++-- .../Scavenging/Helpers/ScavengeLifeCycleScenario.cs | 2 +- .../Scavenging/Helpers/ScavengeTestScenario.cs | 2 +- ...ing_scavenged_tfchunk_with_all_records_removed.cs | 2 +- .../Telemetry/TelemetryServiceTests.cs | 2 +- src/EventStore.Core/ClusterVNode.cs | 12 ++++++------ src/EventStore.Core/Services/RedactionService.cs | 11 +++++------ .../Services/Storage/ReaderIndex/IndexCommitter.cs | 5 ++--- .../Services/Storage/ReaderIndex/IndexWriter.cs | 5 ++--- .../Services/Storage/ReaderIndex/ReadIndex.cs | 5 +++-- src/EventStore.Core/Telemetry/TelemetryService.cs | 5 ++--- .../TransactionLog/Chunks/TFChunkScavenger.cs | 5 ++--- 14 files changed, 30 insertions(+), 34 deletions(-) diff --git a/src/EventStore.Core.Tests/Services/RedactionService/RedactionServiceTestFixture.cs b/src/EventStore.Core.Tests/Services/RedactionService/RedactionServiceTestFixture.cs index dc8b81f2e69..0a6acd816be 100644 --- a/src/EventStore.Core.Tests/Services/RedactionService/RedactionServiceTestFixture.cs +++ b/src/EventStore.Core.Tests/Services/RedactionService/RedactionServiceTestFixture.cs @@ -18,7 +18,7 @@ public RedactionServiceTestFixture() : base(chunkSize: 1024) { } public virtual Task SetUp() { _switchChunksLock = new SemaphoreSlimLock(); RedactionService = new RedactionService(new FakeQueuedHandler(), Db, ReadIndex, _switchChunksLock, - ITransactionFileTrackerFactory.NoOp); + ITransactionFileTracker.NoOp); return Task.CompletedTask; } diff --git a/src/EventStore.Core.Tests/Services/Storage/ReadIndexTestScenario.cs b/src/EventStore.Core.Tests/Services/Storage/ReadIndexTestScenario.cs index 8f5cd367618..cd4dc9f7e7c 100644 --- a/src/EventStore.Core.Tests/Services/Storage/ReadIndexTestScenario.cs +++ b/src/EventStore.Core.Tests/Services/Storage/ReadIndexTestScenario.cs @@ -154,7 +154,7 @@ public override async Task TestFixtureSetUp() { if (_completeLastChunkOnScavenge) Db.Manager.GetChunk(Db.Manager.ChunksCount - 1).Complete(); _scavenger = new TFChunkScavenger(Serilog.Log.Logger, Db, new FakeTFScavengerLog(), TableIndex, ReadIndex, _logFormat.Metastreams, - ITransactionFileTrackerFactory.NoOp); + ITransactionFileTracker.NoOp); await _scavenger.Scavenge(alwaysKeepScavenged: true, mergeChunks: _mergeChunks, scavengeIndex: _scavengeIndex); } diff --git a/src/EventStore.Core.Tests/Services/Storage/WriteEventsToIndexScenario.cs b/src/EventStore.Core.Tests/Services/Storage/WriteEventsToIndexScenario.cs index 615a48f7958..977888cde96 100644 --- a/src/EventStore.Core.Tests/Services/Storage/WriteEventsToIndexScenario.cs +++ b/src/EventStore.Core.Tests/Services/Storage/WriteEventsToIndexScenario.cs @@ -148,11 +148,11 @@ public override async Task TestFixtureSetUp() { _streamNames = _logFormat.StreamNames; _systemStreams = _logFormat.SystemStreams; _indexWriter = new IndexWriter(_indexBackend, _indexReader, _streamIds, _streamNames, - _systemStreams, emptyStreamId, ITransactionFileTrackerFactory.NoOp, _sizer); + _systemStreams, emptyStreamId, ITransactionFileTracker.NoOp, _sizer); _indexCommitter = new IndexCommitter(_publisher, _indexBackend, _indexReader, _tableIndex, _logFormat.StreamNameIndexConfirmer, _streamNames, _logFormat.EventTypeIndexConfirmer, _logFormat.EventTypes, _systemStreams, _logFormat.StreamExistenceFilter, _logFormat.StreamExistenceFilterInitializer, new InMemoryCheckpoint(-1), new IndexStatusTracker.NoOp(), new IndexTracker.NoOp(), - ITransactionFileTrackerFactory.NoOp, false); + ITransactionFileTracker.NoOp, false); WriteEvents(); } diff --git a/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeLifeCycleScenario.cs b/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeLifeCycleScenario.cs index 80973948ef1..346dbc29714 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeLifeCycleScenario.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeLifeCycleScenario.cs @@ -43,7 +43,7 @@ public override async Task TestFixtureSetUp() { Log = new FakeTFScavengerLog(); FakeTableIndex = new FakeTableIndex(); TfChunkScavenger = new TFChunkScavenger(Serilog.Log.Logger, _dbResult.Db, Log, FakeTableIndex, new FakeReadIndex(_ => false, _logFormat.Metastreams), - _logFormat.Metastreams, ITransactionFileTrackerFactory.NoOp); + _logFormat.Metastreams, ITransactionFileTracker.NoOp); try { await When().WithTimeout(TimeSpan.FromMinutes(1)); diff --git a/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeTestScenario.cs b/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeTestScenario.cs index 63e0242b0e3..f9b5d524c6e 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeTestScenario.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeTestScenario.cs @@ -95,7 +95,7 @@ public override async Task TestFixtureSetUp() { var scavenger = new TFChunkScavenger(Serilog.Log.Logger, _dbResult.Db, new FakeTFScavengerLog(), tableIndex, ReadIndex, _logFormat.Metastreams, - ITransactionFileTrackerFactory.NoOp, + ITransactionFileTracker.NoOp, unsafeIgnoreHardDeletes: UnsafeIgnoreHardDelete()); await scavenger.Scavenge(alwaysKeepScavenged: true, mergeChunks: false); } diff --git a/src/EventStore.Core.Tests/TransactionLog/when_having_scavenged_tfchunk_with_all_records_removed.cs b/src/EventStore.Core.Tests/TransactionLog/when_having_scavenged_tfchunk_with_all_records_removed.cs index dde4e161829..90e3340d63f 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_having_scavenged_tfchunk_with_all_records_removed.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_having_scavenged_tfchunk_with_all_records_removed.cs @@ -83,7 +83,7 @@ public override async Task TestFixtureSetUp() { var scavenger = new TFChunkScavenger(Serilog.Log.Logger, _db, new FakeTFScavengerLog(), new FakeTableIndex(), new FakeReadIndex(x => EqualityComparer.Default.Equals(x, streamId), _logFormat.Metastreams), - _logFormat.Metastreams, ITransactionFileTrackerFactory.NoOp); + _logFormat.Metastreams, ITransactionFileTracker.NoOp); await scavenger.Scavenge(alwaysKeepScavenged: true, mergeChunks: false); _scavengedChunk = _db.Manager.GetChunk(0); diff --git a/src/EventStore.Core.XUnit.Tests/Telemetry/TelemetryServiceTests.cs b/src/EventStore.Core.XUnit.Tests/Telemetry/TelemetryServiceTests.cs index e38d8bc7842..75eefc1a5ca 100644 --- a/src/EventStore.Core.XUnit.Tests/Telemetry/TelemetryServiceTests.cs +++ b/src/EventStore.Core.XUnit.Tests/Telemetry/TelemetryServiceTests.cs @@ -34,7 +34,7 @@ public TelemetryServiceTests() { new EnvelopePublisher(new ChannelEnvelope(channel)), _sink, new InMemoryCheckpoint(0), - ITransactionFileTrackerFactory.NoOp, + ITransactionFileTracker.NoOp, Guid.NewGuid()); } diff --git a/src/EventStore.Core/ClusterVNode.cs b/src/EventStore.Core/ClusterVNode.cs index 6399d660e72..82df116cf69 100644 --- a/src/EventStore.Core/ClusterVNode.cs +++ b/src/EventStore.Core/ClusterVNode.cs @@ -598,7 +598,7 @@ TFChunkDbConfig CreateDbConfig( StreamExistenceFilterSize = options.Database.StreamExistenceFilterSize, StreamExistenceFilterCheckpoint = Db.Config.StreamExistenceFilterCheckpoint, TFReaderLeaseFactory = username => { - var tracker = trackers.TransactionFileTrackers.GetOrAdd(username); + var tracker = trackers.TransactionFileTrackers.For(username); return new TFReaderLease(readerPool, tracker); } }); @@ -647,7 +647,7 @@ TFChunkDbConfig CreateDbConfig( () => new HashListMemTable(options.IndexBitnessVersion, maxSize: options.Database.MaxMemTableSize * 2), username => { - var tracker = trackers.TransactionFileTrackers.GetOrAdd(username); + var tracker = trackers.TransactionFileTrackers.For(username); return new TFReaderLease(readerPool, tracker); }, options.IndexBitnessVersion, @@ -1281,7 +1281,7 @@ GossipAdvertiseInfo GetGossipAdvertiseInfo() { }, dispose: backend => backend.Dispose()); - var tracker = trackers.TransactionFileTrackers.GetOrAdd(SystemAccounts.SystemScavengeName); + var tracker = trackers.TransactionFileTrackers.For(SystemAccounts.SystemScavengeName); var state = new ScavengeState( logger, @@ -1387,7 +1387,7 @@ GossipAdvertiseInfo GetGossipAdvertiseInfo() { tableIndex: tableIndex, readIndex: readIndex, metastreams: logFormat.SystemStreams, - tfTrackers: trackers.TransactionFileTrackers, + tfTracker: trackers.TransactionFileTrackers.For(SystemAccounts.SystemScavengeName), unsafeIgnoreHardDeletes: options.Database.UnsafeIgnoreHardDelete, threads: message.Threads))); } @@ -1421,7 +1421,7 @@ GossipAdvertiseInfo GetGossipAdvertiseInfo() { _mainBus.Subscribe(redactionQueue.WidenFrom()); var redactionService = new RedactionService(redactionQueue, Db, _readIndex, _switchChunksLock, - trackers.TransactionFileTrackers); + trackers.TransactionFileTrackers.For(SystemAccounts.SystemRedactionName)); redactionBus.Subscribe(redactionService); redactionBus.Subscribe(redactionService); redactionBus.Subscribe(redactionService); @@ -1454,7 +1454,7 @@ GossipAdvertiseInfo GetGossipAdvertiseInfo() { _mainQueue, new TelemetrySink(options.Application.TelemetryOptout), Db.Config.WriterCheckpoint.AsReadOnly(), - trackers.TransactionFileTrackers, + trackers.TransactionFileTrackers.For(SystemAccounts.SystemTelemetryName), memberInfo.InstanceId); _mainBus.Subscribe(telemetryService); _mainBus.Subscribe(telemetryService); diff --git a/src/EventStore.Core/Services/RedactionService.cs b/src/EventStore.Core/Services/RedactionService.cs index 48be23ff766..e285d089954 100644 --- a/src/EventStore.Core/Services/RedactionService.cs +++ b/src/EventStore.Core/Services/RedactionService.cs @@ -7,7 +7,6 @@ using EventStore.Core.Messages; using EventStore.Core.Messaging; using EventStore.Core.Services.Storage.ReaderIndex; -using EventStore.Core.Services.UserManagement; using EventStore.Core.Synchronization; using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks; @@ -31,7 +30,7 @@ public class RedactionService : private readonly TFChunkDb _db; private readonly IReadIndex _readIndex; private readonly SemaphoreSlimLock _switchChunksLock; - private readonly ITransactionFileTracker _tracker; + private readonly ITransactionFileTracker _tfTracker; private const string NewChunkFileExtension = ".tmp"; @@ -40,7 +39,7 @@ public RedactionService( TFChunkDb db, IReadIndex readIndex, SemaphoreSlimLock switchChunksLock, - ITransactionFileTrackerFactory trackers) { + ITransactionFileTracker tfTracker) { Ensure.NotNull(queuedHandler, nameof(queuedHandler)); Ensure.NotNull(db, nameof(db)); Ensure.NotNull(readIndex, nameof(readIndex)); @@ -50,7 +49,7 @@ public RedactionService( _db = db; _readIndex = readIndex; _switchChunksLock = switchChunksLock; - _tracker = trackers.GetOrAdd(SystemAccounts.SystemRedactionName); + _tfTracker = tfTracker; } public void Handle(RedactionMessage.GetEventPosition message) { @@ -66,7 +65,7 @@ public void Handle(RedactionMessage.GetEventPosition message) { private void GetEventPosition(string streamName, long eventNumber, IEnvelope envelope) { var streamId = _readIndex.GetStreamId(streamName); - var result = _readIndex.ReadEventInfo_KeepDuplicates(streamId, eventNumber, _tracker); + var result = _readIndex.ReadEventInfo_KeepDuplicates(streamId, eventNumber, _tfTracker); var eventPositions = new EventPosition[result.EventInfos.Length]; @@ -75,7 +74,7 @@ private void GetEventPosition(string streamName, long eventNumber, IEnvelope env var logPos = eventInfo.LogPosition; var chunk = _db.Manager.GetChunkFor(logPos); var localPosition = chunk.ChunkHeader.GetLocalLogPosition(logPos); - var chunkEventOffset = chunk.GetActualRawPosition(localPosition, _tracker); + var chunkEventOffset = chunk.GetActualRawPosition(localPosition, _tfTracker); // all the events returned by ReadEventInfo_KeepDuplicates() must exist in the log // since the log record was read from the chunk to check for hash collisions. diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/IndexCommitter.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/IndexCommitter.cs index 13002475cd0..ce89bca2868 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/IndexCommitter.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/IndexCommitter.cs @@ -13,7 +13,6 @@ using EventStore.Core.TransactionLog.LogRecords; using ILogger = Serilog.ILogger; using EventStore.LogCommon; -using EventStore.Core.Services.UserManagement; namespace EventStore.Core.Services.Storage.ReaderIndex { public interface IIndexCommitter { @@ -72,7 +71,7 @@ public IndexCommitter( ICheckpoint indexChk, IIndexStatusTracker statusTracker, IIndexTracker tracker, - ITransactionFileTrackerFactory tfTrackers, + ITransactionFileTracker tfTracker, bool additionalCommitChecks) { _bus = bus; _backend = backend; @@ -89,7 +88,7 @@ public IndexCommitter( _additionalCommitChecks = additionalCommitChecks; _statusTracker = statusTracker; _tracker = tracker; - _tfTracker = tfTrackers.GetOrAdd(SystemAccounts.SystemIndexCommitterName); + _tfTracker = tfTracker; } public void Init(long buildToPosition) { diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/IndexWriter.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/IndexWriter.cs index f071f2695ab..c4cd728120a 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/IndexWriter.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/IndexWriter.cs @@ -7,7 +7,6 @@ using EventStore.Core.Data; using EventStore.Core.DataStructures; using EventStore.Core.LogAbstraction; -using EventStore.Core.Services.UserManagement; using EventStore.Core.Settings; using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; @@ -96,7 +95,7 @@ public IndexWriter( INameLookup streamNames, ISystemStreamLookup systemStreams, TStreamId emptyStreamId, - ITransactionFileTrackerFactory trackers, + ITransactionFileTracker tfTracker, ISizer inMemorySizer) { Ensure.NotNull(indexBackend, "indexBackend"); Ensure.NotNull(indexReader, "indexReader"); @@ -113,7 +112,7 @@ public IndexWriter( _streamNames = streamNames; _systemStreams = systemStreams; _emptyStreamId = emptyStreamId; - _tracker = trackers.GetOrAdd(SystemAccounts.SystemWriterName); + _tracker = tfTracker; } public void Reset() { diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/ReadIndex.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/ReadIndex.cs index f8d01e24371..d603647876a 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/ReadIndex.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/ReadIndex.cs @@ -10,6 +10,7 @@ using EventStore.Core.LogAbstraction; using EventStore.Core.Messages; using EventStore.Core.Metrics; +using EventStore.Core.Services.UserManagement; using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Checkpoint; using EventStore.Core.TransactionLog.Chunks; @@ -90,10 +91,10 @@ public ReadIndex(IPublisher bus, var eventTypeNames = streamNamesProvider.EventTypes; var streamExistenceFilterInitializer = streamNamesProvider.StreamExistenceFilterInitializer; - _indexWriter = new IndexWriter(indexBackend, _indexReader, _streamIds, _streamNames, systemStreams, emptyStreamName, tfTrackers, sizer); + _indexWriter = new IndexWriter(indexBackend, _indexReader, _streamIds, _streamNames, systemStreams, emptyStreamName, tfTrackers.For(SystemAccounts.SystemWriterName), sizer); _indexCommitter = new IndexCommitter(bus, indexBackend, _indexReader, tableIndex, streamNameIndex, _streamNames, eventTypeIndex, eventTypeNames, systemStreams, streamExistenceFilter, - streamExistenceFilterInitializer, indexCheckpoint, indexStatusTracker, indexTracker, tfTrackers, additionalCommitChecks); + streamExistenceFilterInitializer, indexCheckpoint, indexStatusTracker, indexTracker, tfTrackers.For(SystemAccounts.SystemIndexCommitterName), additionalCommitChecks); _allReader = new AllReader(indexBackend, _indexCommitter, _streamNames, eventTypeNames); RegisterHitsMisses(cacheTracker); diff --git a/src/EventStore.Core/Telemetry/TelemetryService.cs b/src/EventStore.Core/Telemetry/TelemetryService.cs index 4efb7c91c8f..6daf27aa771 100644 --- a/src/EventStore.Core/Telemetry/TelemetryService.cs +++ b/src/EventStore.Core/Telemetry/TelemetryService.cs @@ -9,7 +9,6 @@ using EventStore.Core.Messages; using EventStore.Core.Messaging; using EventStore.Core.Services.TimerService; -using EventStore.Core.Services.UserManagement; using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Checkpoint; using EventStore.Core.TransactionLog.Chunks; @@ -47,14 +46,14 @@ public TelemetryService( IPublisher publisher, ITelemetrySink sink, IReadOnlyCheckpoint writerCheckpoint, - ITransactionFileTrackerFactory tfTrackers, + ITransactionFileTracker tfTracker, Guid nodeId) { _manager = manager; _nodeOptions = nodeOptions; _publisher = publisher; _writerCheckpoint = writerCheckpoint; - _tfTracker = tfTrackers.GetOrAdd(SystemAccounts.SystemTelemetryName); + _tfTracker = tfTracker; _nodeId = nodeId; Task.Run(async () => { try { diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunkScavenger.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunkScavenger.cs index 39bb84c071d..c8edc515337 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunkScavenger.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunkScavenger.cs @@ -12,7 +12,6 @@ using EventStore.Core.Index; using EventStore.Core.LogAbstraction; using EventStore.Core.Services.Storage.ReaderIndex; -using EventStore.Core.Services.UserManagement; using EventStore.Core.TransactionLog.Chunks.TFChunk; using EventStore.Core.TransactionLog.LogRecords; using EventStore.Core.TransactionLog.Scavenging; @@ -41,7 +40,7 @@ public class TFChunkScavenger : TFChunkScavenger { public TFChunkScavenger(ILogger logger, TFChunkDb db, ITFChunkScavengerLog scavengerLog, ITableIndex tableIndex, IReadIndex readIndex, IMetastreamLookup metastreams, - ITransactionFileTrackerFactory tfTrackers, + ITransactionFileTracker tfTracker, long? maxChunkDataSize = null, bool unsafeIgnoreHardDeletes = false, int threads = 1) { Ensure.NotNull(logger, nameof(logger)); @@ -66,7 +65,7 @@ public TFChunkScavenger(ILogger logger, TFChunkDb db, ITFChunkScavengerLog scave _tableIndex = tableIndex; _readIndex = readIndex; _metastreams = metastreams; - _tfTracker = tfTrackers.GetOrAdd(SystemAccounts.SystemScavengeName); + _tfTracker = tfTracker; _maxChunkDataSize = maxChunkDataSize ?? db.Config.ChunkSize; _unsafeIgnoreHardDeletes = unsafeIgnoreHardDeletes; _threads = threads; From 5f447750160d2f0c7453585c65f7304902e54ec2 Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Mon, 25 Nov 2024 09:44:15 +0000 Subject: [PATCH 30/38] no need to separate index-scavenge from scavenge --- src/EventStore.Core/Index/TableIndex.cs | 2 +- .../Services/UserManagement/SystemAccounts.cs | 9 +++------ 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/src/EventStore.Core/Index/TableIndex.cs b/src/EventStore.Core/Index/TableIndex.cs index f5a9eba8b8e..b214a4f85eb 100644 --- a/src/EventStore.Core/Index/TableIndex.cs +++ b/src/EventStore.Core/Index/TableIndex.cs @@ -465,7 +465,7 @@ private void ScavengeInternal( try { ct.ThrowIfCancellationRequested(); - using (var reader = _tfReaderFactory(SystemAccounts.SystemIndexScavengeName)) { + using (var reader = _tfReaderFactory(SystemAccounts.SystemScavengeName)) { var indexmapFile = Path.Combine(_directory, IndexMapFilename); Func existsAt = entry => reader.ExistsAt(entry.Position); diff --git a/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs b/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs index ce28aabe7f9..26a7fdb97bc 100644 --- a/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs +++ b/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs @@ -4,20 +4,17 @@ namespace EventStore.Core.Services.UserManagement { public class SystemAccounts { private static readonly IReadOnlyList Claims = new[] { - new Claim(ClaimTypes.Name, "system"), - new Claim(ClaimTypes.Role, "system"), - new Claim(ClaimTypes.Role, SystemRoles.Admins), + new Claim(ClaimTypes.Name, "system"), + new Claim(ClaimTypes.Role, "system"), + new Claim(ClaimTypes.Role, SystemRoles.Admins), }; public static readonly ClaimsPrincipal System = new ClaimsPrincipal(new ClaimsIdentity(Claims, "system")); public static readonly ClaimsPrincipal Anonymous = new ClaimsPrincipal(new ClaimsIdentity(new Claim[]{new Claim(ClaimTypes.Anonymous, ""), })); - //qq risk if we create a claims principle for projections? - //qq consider granularity public static readonly string SystemChaserName = "system-chaser"; public static readonly string SystemEpochManagerName = "system-epoch-manager"; public static readonly string SystemName = "system"; public static readonly string SystemIndexMergeName = "system-index-merge"; - public static readonly string SystemIndexScavengeName = "system-index-scavenge"; public static readonly string SystemIndexCommitterName = "system-index-committer"; public static readonly string SystemPersistentSubscriptionsName = "system-persistent-subscriptions"; public static readonly string SystemRedactionName = "system-redaction"; From f2e763cb9e0e3bb88f4bd08ce3dc6d08ceacba50 Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Mon, 25 Nov 2024 10:32:14 +0000 Subject: [PATCH 31/38] track replication reads --- .../SwitchChunkFailureTests.cs | 3 +- .../with_replication_service.cs | 2 + ...h_replication_service_and_epoch_manager.cs | 1 + .../LogReplication/LogReplicationFixture.cs | 2 + .../LogReplicationWithExistingDbFixture.cs | 4 +- ...tfchunkreader_existsat_optimizer_should.cs | 3 +- .../Scavenging/scavenged_chunk.cs | 5 +- .../TransactionLog/TFChunkHelper.cs | 3 +- .../Validation/when_validating_tfchunk_db.cs | 3 +- .../when_closing_the_database.cs | 3 +- ...hen_destroying_a_tfchunk_that_is_locked.cs | 3 +- ...chunk_that_has_been_locked_and_unlocked.cs | 3 +- .../when_opening_existing_tfchunk.cs | 4 +- ..._opening_tfchunk_from_non_existing_file.cs | 3 +- ..._reading_cached_empty_scavenged_tfchunk.cs | 2 +- .../when_reading_from_a_cached_tfchunk.cs | 4 +- ...reading_logical_bytes_bulk_from_a_chunk.cs | 15 ++--- ...eading_physical_bytes_bulk_from_a_chunk.cs | 9 +-- .../when_uncaching_a_tfchunk.cs | 4 +- ...chunk_that_has_been_marked_for_deletion.cs | 3 +- ...actionFileTrackerFactoryExtensionsTests.cs | 3 + .../Scavenge/Infrastructure/Scenario.cs | 3 +- src/EventStore.Core/ClusterVNode.cs | 3 +- .../Services/RedactionService.cs | 3 +- .../Replication/LeaderReplicationService.cs | 7 ++- .../Services/UserManagement/SystemAccounts.cs | 1 + .../TransactionLog/Chunks/TFChunk/TFChunk.cs | 63 ++++++++++--------- .../Chunks/TFChunkBulkReader.cs | 15 ++++- .../TransactionLog/Chunks/TFChunkDb.cs | 23 ++++--- .../TransactionLog/Chunks/TFChunkManager.cs | 17 +++-- .../TransactionLog/Chunks/TFChunkScavenger.cs | 6 +- .../Chunks/TransactionFileTracker.cs | 7 ++- .../TransactionLog/ITransactionFileTracker.cs | 5 +- .../DbAccess/ChunkManagerForExecutor.cs | 2 +- .../DbAccess/ChunkWriterForExecutor.cs | 6 +- 35 files changed, 155 insertions(+), 88 deletions(-) diff --git a/src/EventStore.Core.Tests/Services/RedactionService/SwitchChunkFailureTests.cs b/src/EventStore.Core.Tests/Services/RedactionService/SwitchChunkFailureTests.cs index 77c8ff4b8f7..633970a61cf 100644 --- a/src/EventStore.Core.Tests/Services/RedactionService/SwitchChunkFailureTests.cs +++ b/src/EventStore.Core.Tests/Services/RedactionService/SwitchChunkFailureTests.cs @@ -2,6 +2,7 @@ using System.IO; using System.Threading.Tasks; using EventStore.Core.Data.Redaction; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.Chunks.TFChunk; using NUnit.Framework; @@ -131,7 +132,7 @@ public async Task cannot_switch_with_chunk_having_mismatched_range() { newChunk = $"{nameof(cannot_switch_with_chunk_having_mismatched_range)}-chunk-0-2.tmp"; var chunkHeader = new ChunkHeader(1, 1024, 0, 2, true, Guid.NewGuid()); - var chunk = TFChunk.CreateWithHeader(Path.Combine(PathName, newChunk), chunkHeader, 1024, false, false, false, 1, 1, false); + var chunk = TFChunk.CreateWithHeader(Path.Combine(PathName, newChunk), chunkHeader, 1024, false, false, false, 1, 1, false, ITransactionFileTracker.NoOp); chunk.Dispose(); msg = await SwitchChunk(GetChunk(0, 0), newChunk); Assert.AreEqual(SwitchChunkResult.ChunkRangeDoesNotMatch, msg.Result); diff --git a/src/EventStore.Core.Tests/Services/Replication/LeaderReplication/with_replication_service.cs b/src/EventStore.Core.Tests/Services/Replication/LeaderReplication/with_replication_service.cs index 661c67ebcf9..5861801010f 100644 --- a/src/EventStore.Core.Tests/Services/Replication/LeaderReplication/with_replication_service.cs +++ b/src/EventStore.Core.Tests/Services/Replication/LeaderReplication/with_replication_service.cs @@ -14,6 +14,7 @@ using EventStore.Core.Tests.Helpers; using EventStore.Core.Tests.Services.ElectionsService; using EventStore.Core.Tests.Services.Transport.Tcp; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Checkpoint; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.FileNamingStrategy; @@ -68,6 +69,7 @@ public override async Task TestFixtureSetUp() { epochManager: new FakeEpochManager(), clusterSize: ClusterSize, unsafeAllowSurplusNodes: false, + tfTracker: ITransactionFileTracker.NoOp, queueStatsManager: new QueueStatsManager()); Service.Handle(new SystemMessage.SystemStart()); diff --git a/src/EventStore.Core.Tests/Services/Replication/LeaderReplication/with_replication_service_and_epoch_manager.cs b/src/EventStore.Core.Tests/Services/Replication/LeaderReplication/with_replication_service_and_epoch_manager.cs index 575b04d2b82..b6569c6bb2a 100644 --- a/src/EventStore.Core.Tests/Services/Replication/LeaderReplication/with_replication_service_and_epoch_manager.cs +++ b/src/EventStore.Core.Tests/Services/Replication/LeaderReplication/with_replication_service_and_epoch_manager.cs @@ -85,6 +85,7 @@ public override async Task TestFixtureSetUp() { EpochManager, ClusterSize, false, + ITransactionFileTracker.NoOp, new QueueStatsManager()); Service.Handle(new SystemMessage.SystemStart()); diff --git a/src/EventStore.Core.Tests/Services/Replication/LogReplication/LogReplicationFixture.cs b/src/EventStore.Core.Tests/Services/Replication/LogReplication/LogReplicationFixture.cs index a4e0728aeb7..3a0e8b6d91f 100644 --- a/src/EventStore.Core.Tests/Services/Replication/LogReplication/LogReplicationFixture.cs +++ b/src/EventStore.Core.Tests/Services/Replication/LogReplication/LogReplicationFixture.cs @@ -20,6 +20,7 @@ using EventStore.Core.Tests.Services.Storage; using EventStore.Core.Tests.Services.Storage.ReadIndex; using EventStore.Core.Time; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Checkpoint; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.FileNamingStrategy; @@ -149,6 +150,7 @@ private LeaderInfo CreateLeader(TFChunkDb db) { epochManager: epochManager, clusterSize: ClusterSize, unsafeAllowSurplusNodes: false, + tfTracker: ITransactionFileTracker.NoOp, queueStatsManager: new QueueStatsManager()); var tcpSendService = new TcpSendService(); diff --git a/src/EventStore.Core.Tests/Services/Replication/LogReplication/LogReplicationWithExistingDbFixture.cs b/src/EventStore.Core.Tests/Services/Replication/LogReplication/LogReplicationWithExistingDbFixture.cs index 93e8ba66161..61b8ef86179 100644 --- a/src/EventStore.Core.Tests/Services/Replication/LogReplication/LogReplicationWithExistingDbFixture.cs +++ b/src/EventStore.Core.Tests/Services/Replication/LogReplication/LogReplicationWithExistingDbFixture.cs @@ -2,6 +2,7 @@ using System.Collections.Generic; using System.Threading.Tasks; using EventStore.Core.LogAbstraction; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.Chunks.TFChunk; using EventStore.Core.TransactionLog.LogRecords; @@ -46,7 +47,8 @@ protected static Task CreateChunk(TFChunkDb db, bool raw, bool complete, int chu writethrough: db.Config.WriteThrough, initialReaderCount: db.Config.InitialReaderCount, maxReaderCount: db.Config.MaxReaderCount, - reduceFileCachePressure: db.Config.ReduceFileCachePressure); + reduceFileCachePressure: db.Config.ReduceFileCachePressure, + tracker: ITransactionFileTracker.NoOp); var posMaps = new List(); diff --git a/src/EventStore.Core.Tests/TransactionLog/Optimization/tfchunkreader_existsat_optimizer_should.cs b/src/EventStore.Core.Tests/TransactionLog/Optimization/tfchunkreader_existsat_optimizer_should.cs index f7cf85cead9..0b878cac3fb 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Optimization/tfchunkreader_existsat_optimizer_should.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Optimization/tfchunkreader_existsat_optimizer_should.cs @@ -92,7 +92,8 @@ private TFChunk CreateChunk(int chunkNumber, bool scavenged, out List po chunkNumber, chunkNumber, scavenged, false, false, false, Constants.TFChunkInitialReaderCountDefault, Constants.TFChunkMaxReaderCountDefault, - false); + false, + ITransactionFileTracker.NoOp); long offset = chunkNumber * 1024 * 1024; long logPos = 0 + offset; for (int i = 0, n = ChunkFooter.Size / PosMap.FullSize + 1; i < n; ++i) { diff --git a/src/EventStore.Core.Tests/TransactionLog/Scavenging/scavenged_chunk.cs b/src/EventStore.Core.Tests/TransactionLog/Scavenging/scavenged_chunk.cs index 4e4b57707f6..bd5bde82810 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Scavenging/scavenged_chunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Scavenging/scavenged_chunk.cs @@ -15,7 +15,8 @@ public void is_fully_resident_in_memory_when_cached() { var chunk = TFChunk.CreateNew(Filename, 1024 * 1024, 0, 0, true, false, false, false, Constants.TFChunkInitialReaderCountDefault, Constants.TFChunkMaxReaderCountDefault, - false); + false, + ITransactionFileTracker.NoOp); long logPos = 0; for (int i = 0, n = ChunkFooter.Size / PosMap.FullSize + 1; i < n; ++i) { map.Add(new PosMap(logPos, (int)logPos)); @@ -26,7 +27,7 @@ public void is_fully_resident_in_memory_when_cached() { chunk.CompleteScavenge(map); - chunk.CacheInMemory(); + chunk.CacheInMemory(ITransactionFileTracker.NoOp); Assert.IsTrue(chunk.IsCached); diff --git a/src/EventStore.Core.Tests/TransactionLog/TFChunkHelper.cs b/src/EventStore.Core.Tests/TransactionLog/TFChunkHelper.cs index 275c7859ff5..2763431f8e3 100644 --- a/src/EventStore.Core.Tests/TransactionLog/TFChunkHelper.cs +++ b/src/EventStore.Core.Tests/TransactionLog/TFChunkHelper.cs @@ -1,4 +1,5 @@ using EventStore.Core.Settings; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Checkpoint; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.Chunks.TFChunk; @@ -72,7 +73,7 @@ public static TFChunkDbConfig CreateDbConfig( public static TFChunk CreateNewChunk(string fileName, int chunkSize = 4096, bool isScavenged = false) { return TFChunk.CreateNew(fileName, chunkSize, 0, 0, isScavenged: isScavenged, inMem: false, unbuffered: false, - writethrough: false, initialReaderCount: Constants.TFChunkInitialReaderCountDefault, maxReaderCount: Constants.TFChunkMaxReaderCountDefault, reduceFileCachePressure: false); + writethrough: false, initialReaderCount: Constants.TFChunkInitialReaderCountDefault, maxReaderCount: Constants.TFChunkMaxReaderCountDefault, reduceFileCachePressure: false, ITransactionFileTracker.NoOp); } } } diff --git a/src/EventStore.Core.Tests/TransactionLog/Validation/when_validating_tfchunk_db.cs b/src/EventStore.Core.Tests/TransactionLog/Validation/when_validating_tfchunk_db.cs index 5c7c0104a91..0c9550d45ac 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Validation/when_validating_tfchunk_db.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Validation/when_validating_tfchunk_db.cs @@ -5,6 +5,7 @@ using System.Threading; using System.Threading.Tasks; using EventStore.Core.Exceptions; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Checkpoint; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.FileNamingStrategy; @@ -446,7 +447,7 @@ public void when_prelast_chunk_corrupted_throw_hash_validation_exception() { .WriteTo.Sink(sink) .MinimumLevel.Verbose() .CreateLogger()) - using (var db = new TFChunkDb(config, log)) { + using (var db = new TFChunkDb(config, ITransactionFileTracker.NoOp, log)) { byte[] contents = new byte[config.ChunkSize]; for (var i = 0; i < config.ChunkSize; i++) { contents[i] = 0; diff --git a/src/EventStore.Core.Tests/TransactionLog/when_closing_the_database.cs b/src/EventStore.Core.Tests/TransactionLog/when_closing_the_database.cs index 500ee6153ae..89a33e1393e 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_closing_the_database.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_closing_the_database.cs @@ -1,6 +1,7 @@ using System; using System.IO; using System.Threading.Tasks; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Checkpoint; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.Chunks.TFChunk; @@ -67,7 +68,7 @@ public override Task SetUp() { public void checkpoints_should_be_flushed_only_when_chunks_are_properly_closed(bool chunksClosed) { if (!chunksClosed) { // acquire a reader to prevent the chunk from being properly closed - _db.Manager.GetChunk(0).AcquireReader(); + _db.Manager.GetChunk(0).AcquireReader(ITransactionFileTracker.NoOp); } var writer = new TFChunkWriter(_db); diff --git a/src/EventStore.Core.Tests/TransactionLog/when_destroying_a_tfchunk_that_is_locked.cs b/src/EventStore.Core.Tests/TransactionLog/when_destroying_a_tfchunk_that_is_locked.cs index 33a9d21bf32..986b461a9c9 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_destroying_a_tfchunk_that_is_locked.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_destroying_a_tfchunk_that_is_locked.cs @@ -1,4 +1,5 @@ using System.IO; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.Chunks.TFChunk; using NUnit.Framework; @@ -15,7 +16,7 @@ public override void SetUp() { _chunk = TFChunkHelper.CreateNewChunk(Filename, 1000); _chunk.Complete(); _chunk.UnCacheFromMemory(); - _reader = _chunk.AcquireReader(); + _reader = _chunk.AcquireReader(ITransactionFileTracker.NoOp); _chunk.MarkForDeletion(); } diff --git a/src/EventStore.Core.Tests/TransactionLog/when_marking_for_deletion_a_tfchunk_that_has_been_locked_and_unlocked.cs b/src/EventStore.Core.Tests/TransactionLog/when_marking_for_deletion_a_tfchunk_that_has_been_locked_and_unlocked.cs index e21ef99f282..cfc5ef4cec3 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_marking_for_deletion_a_tfchunk_that_has_been_locked_and_unlocked.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_marking_for_deletion_a_tfchunk_that_has_been_locked_and_unlocked.cs @@ -1,4 +1,5 @@ using System.IO; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks.TFChunk; using NUnit.Framework; @@ -11,7 +12,7 @@ public class when_marking_for_deletion_a_tfchunk_that_has_been_locked_and_unlock public override void SetUp() { base.SetUp(); _chunk = TFChunkHelper.CreateNewChunk(Filename, 1000); - var reader = _chunk.AcquireReader(); + var reader = _chunk.AcquireReader(ITransactionFileTracker.NoOp); _chunk.MarkForDeletion(); reader.Release(); } diff --git a/src/EventStore.Core.Tests/TransactionLog/when_opening_existing_tfchunk.cs b/src/EventStore.Core.Tests/TransactionLog/when_opening_existing_tfchunk.cs index 5c48d023dfb..44c0c0c68d9 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_opening_existing_tfchunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_opening_existing_tfchunk.cs @@ -1,4 +1,5 @@ using System; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.Chunks.TFChunk; using EventStore.Core.TransactionLog.LogRecords; @@ -18,7 +19,8 @@ public override void TestFixtureSetUp() { _testChunk = TFChunk.FromCompletedFile(Filename, true, false, Constants.TFChunkInitialReaderCountDefault, Constants.TFChunkMaxReaderCountDefault, - reduceFileCachePressure: false); + reduceFileCachePressure: false, + tracker: ITransactionFileTracker.NoOp); } [TearDown] diff --git a/src/EventStore.Core.Tests/TransactionLog/when_opening_tfchunk_from_non_existing_file.cs b/src/EventStore.Core.Tests/TransactionLog/when_opening_tfchunk_from_non_existing_file.cs index 3404a706bb5..50cdb9882fd 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_opening_tfchunk_from_non_existing_file.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_opening_tfchunk_from_non_existing_file.cs @@ -1,4 +1,5 @@ using EventStore.Core.Exceptions; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.Chunks.TFChunk; using NUnit.Framework; @@ -8,7 +9,7 @@ namespace EventStore.Core.Tests.TransactionLog { public class when_opening_tfchunk_from_non_existing_file : SpecificationWithFile { [Test] public void it_should_throw_a_file_not_found_exception() { - Assert.Throws(() => TFChunk.FromCompletedFile(Filename, verifyHash: true, + Assert.Throws(() => TFChunk.FromCompletedFile(Filename, verifyHash: true, tracker: ITransactionFileTracker.NoOp, unbufferedRead: false, initialReaderCount: Constants.TFChunkInitialReaderCountDefault, maxReaderCount: Constants.TFChunkMaxReaderCountDefault, reduceFileCachePressure: false)); } } diff --git a/src/EventStore.Core.Tests/TransactionLog/when_reading_cached_empty_scavenged_tfchunk.cs b/src/EventStore.Core.Tests/TransactionLog/when_reading_cached_empty_scavenged_tfchunk.cs index 7f4bff51e8d..c60878532f6 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_reading_cached_empty_scavenged_tfchunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_reading_cached_empty_scavenged_tfchunk.cs @@ -12,7 +12,7 @@ public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _chunk = TFChunkHelper.CreateNewChunk(Filename, isScavenged: true); _chunk.CompleteScavenge(new PosMap[0]); - _chunk.CacheInMemory(); + _chunk.CacheInMemory(ITransactionFileTracker.NoOp); } [OneTimeTearDown] diff --git a/src/EventStore.Core.Tests/TransactionLog/when_reading_from_a_cached_tfchunk.cs b/src/EventStore.Core.Tests/TransactionLog/when_reading_from_a_cached_tfchunk.cs index 659611c21e4..90487f978b5 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_reading_from_a_cached_tfchunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_reading_from_a_cached_tfchunk.cs @@ -29,9 +29,9 @@ public override void TestFixtureSetUp() { _result = _chunk.TryAppend(_record); _chunk.Flush(); _chunk.Complete(); - _cachedChunk = TFChunk.FromCompletedFile(Filename, verifyHash: true, unbufferedRead: false, + _cachedChunk = TFChunk.FromCompletedFile(Filename, verifyHash: true, unbufferedRead: false, tracker: ITransactionFileTracker.NoOp, initialReaderCount: Constants.TFChunkInitialReaderCountDefault, maxReaderCount: Constants.TFChunkMaxReaderCountDefault, reduceFileCachePressure: false); - _cachedChunk.CacheInMemory(); + _cachedChunk.CacheInMemory(ITransactionFileTracker.NoOp); } [OneTimeTearDown] diff --git a/src/EventStore.Core.Tests/TransactionLog/when_reading_logical_bytes_bulk_from_a_chunk.cs b/src/EventStore.Core.Tests/TransactionLog/when_reading_logical_bytes_bulk_from_a_chunk.cs index c334ea8290a..4459436e39c 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_reading_logical_bytes_bulk_from_a_chunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_reading_logical_bytes_bulk_from_a_chunk.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.LogAbstraction; using EventStore.Core.LogV2; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks.TFChunk; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; @@ -15,7 +16,7 @@ public when_reading_logical_bytes_bulk_from_a_chunk() { [Test] public void the_file_will_not_be_deleted_until_reader_released() { var chunk = TFChunkHelper.CreateNewChunk(GetFilePathFor("file1"), 2000); - using (var reader = chunk.AcquireReader()) { + using (var reader = chunk.AcquireReader(ITransactionFileTracker.NoOp)) { chunk.MarkForDeletion(); var buffer = new byte[1024]; var result = reader.ReadNextDataBytes(1024, buffer); @@ -29,7 +30,7 @@ public void the_file_will_not_be_deleted_until_reader_released() { [Test] public void a_read_on_new_file_can_be_performed_but_returns_nothing() { var chunk = TFChunkHelper.CreateNewChunk(GetFilePathFor("file1"), 2000); - using (var reader = chunk.AcquireReader()) { + using (var reader = chunk.AcquireReader(ITransactionFileTracker.NoOp)) { var buffer = new byte[1024]; var result = reader.ReadNextDataBytes(1024, buffer); Assert.IsFalse(result.IsEOF); @@ -44,7 +45,7 @@ public void a_read_on_new_file_can_be_performed_but_returns_nothing() { public void a_read_past_end_of_completed_chunk_does_not_include_footer() { var chunk = TFChunkHelper.CreateNewChunk(GetFilePathFor("file1"), 300); chunk.Complete(); // chunk has 0 bytes of actual data - using (var reader = chunk.AcquireReader()) { + using (var reader = chunk.AcquireReader(ITransactionFileTracker.NoOp)) { var buffer = new byte[1024]; var result = reader.ReadNextDataBytes(1024, buffer); Assert.IsTrue(result.IsEOF); @@ -60,7 +61,7 @@ public void a_read_past_end_of_completed_chunk_does_not_include_footer() { public void a_read_on_scavenged_chunk_does_not_include_map() { var chunk = TFChunkHelper.CreateNewChunk(GetFilePathFor("afile"), 200, isScavenged: true); chunk.CompleteScavenge(new[] {new PosMap(0, 0), new PosMap(1, 1)}); - using (var reader = chunk.AcquireReader()) { + using (var reader = chunk.AcquireReader(ITransactionFileTracker.NoOp)) { var buffer = new byte[1024]; var result = reader.ReadNextDataBytes(1024, buffer); Assert.IsTrue(result.IsEOF); @@ -82,7 +83,7 @@ public void if_asked_for_more_than_buffer_size_will_only_read_buffer_size() { new byte[2000], null); Assert.IsTrue(chunk.TryAppend(rec).Success, "Record was not appended"); - using (var reader = chunk.AcquireReader()) { + using (var reader = chunk.AcquireReader(ITransactionFileTracker.NoOp)) { var buffer = new byte[1024]; var result = reader.ReadNextDataBytes(3000, buffer); Assert.IsFalse(result.IsEOF); @@ -98,7 +99,7 @@ public void a_read_past_eof_doesnt_return_eof_if_chunk_is_not_yet_completed() { var chunk = TFChunkHelper.CreateNewChunk(GetFilePathFor("file1"), 300); var rec = LogRecord.Commit(0, Guid.NewGuid(), 0, 0); Assert.IsTrue(chunk.TryAppend(rec).Success, "Record was not appended"); - using (var reader = chunk.AcquireReader()) { + using (var reader = chunk.AcquireReader(ITransactionFileTracker.NoOp)) { var buffer = new byte[1024]; var result = reader.ReadNextDataBytes(1024, buffer); Assert.IsFalse(result.IsEOF, "EOF was returned."); @@ -119,7 +120,7 @@ public void a_read_past_eof_returns_eof_if_chunk_is_completed() { Assert.IsTrue(chunk.TryAppend(rec).Success, "Record was not appended"); chunk.Complete(); - using (var reader = chunk.AcquireReader()) { + using (var reader = chunk.AcquireReader(ITransactionFileTracker.NoOp)) { var buffer = new byte[1024]; var result = reader.ReadNextDataBytes(1024, buffer); Assert.IsTrue(result.IsEOF, "EOF was not returned."); diff --git a/src/EventStore.Core.Tests/TransactionLog/when_reading_physical_bytes_bulk_from_a_chunk.cs b/src/EventStore.Core.Tests/TransactionLog/when_reading_physical_bytes_bulk_from_a_chunk.cs index 665fd765d11..7f2cb81042a 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_reading_physical_bytes_bulk_from_a_chunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_reading_physical_bytes_bulk_from_a_chunk.cs @@ -1,3 +1,4 @@ +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.Chunks.TFChunk; using NUnit.Framework; @@ -8,7 +9,7 @@ public class when_reading_physical_bytes_bulk_from_a_chunk : SpecificationWithDi [Test] public void the_file_will_not_be_deleted_until_reader_released() { var chunk = TFChunkHelper.CreateNewChunk(GetFilePathFor("file1"), 2000); - using (var reader = chunk.AcquireReader()) { + using (var reader = chunk.AcquireReader(ITransactionFileTracker.NoOp)) { chunk.MarkForDeletion(); var buffer = new byte[1024]; var result = reader.ReadNextRawBytes(1024, buffer); @@ -22,7 +23,7 @@ public void the_file_will_not_be_deleted_until_reader_released() { [Test] public void a_read_on_new_file_can_be_performed() { var chunk = TFChunkHelper.CreateNewChunk(GetFilePathFor("file1"), 2000); - using (var reader = chunk.AcquireReader()) { + using (var reader = chunk.AcquireReader(ITransactionFileTracker.NoOp)) { var buffer = new byte[1024]; var result = reader.ReadNextRawBytes(1024, buffer); Assert.IsFalse(result.IsEOF); @@ -69,7 +70,7 @@ public void a_read_past_end_of_completed_chunk_does_include_header_or_footer() [Test] public void if_asked_for_more_than_buffer_size_will_only_read_buffer_size() { var chunk = TFChunkHelper.CreateNewChunk(GetFilePathFor("file1"), 3000); - using (var reader = chunk.AcquireReader()) { + using (var reader = chunk.AcquireReader(ITransactionFileTracker.NoOp)) { var buffer = new byte[1024]; var result = reader.ReadNextRawBytes(3000, buffer); Assert.IsFalse(result.IsEOF); @@ -83,7 +84,7 @@ public void if_asked_for_more_than_buffer_size_will_only_read_buffer_size() { [Test] public void a_read_past_eof_returns_eof_and_no_footer() { var chunk = TFChunkHelper.CreateNewChunk(GetFilePathFor("file1"), 300); - using (var reader = chunk.AcquireReader()) { + using (var reader = chunk.AcquireReader(ITransactionFileTracker.NoOp)) { var buffer = new byte[8092]; var result = reader.ReadNextRawBytes(8092, buffer); Assert.IsTrue(result.IsEOF); diff --git a/src/EventStore.Core.Tests/TransactionLog/when_uncaching_a_tfchunk.cs b/src/EventStore.Core.Tests/TransactionLog/when_uncaching_a_tfchunk.cs index 5519ea79186..db889b6fb0d 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_uncaching_a_tfchunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_uncaching_a_tfchunk.cs @@ -29,9 +29,9 @@ public override void TestFixtureSetUp() { _result = _chunk.TryAppend(_record); _chunk.Flush(); _chunk.Complete(); - _uncachedChunk = TFChunk.FromCompletedFile(Filename, verifyHash: true, unbufferedRead: false, + _uncachedChunk = TFChunk.FromCompletedFile(Filename, verifyHash: true, unbufferedRead: false, tracker: ITransactionFileTracker.NoOp, initialReaderCount: Constants.TFChunkInitialReaderCountDefault, maxReaderCount: Constants.TFChunkMaxReaderCountDefault, reduceFileCachePressure: false); - _uncachedChunk.CacheInMemory(); + _uncachedChunk.CacheInMemory(ITransactionFileTracker.NoOp); _uncachedChunk.UnCacheFromMemory(); } diff --git a/src/EventStore.Core.Tests/TransactionLog/when_unlocking_a_tfchunk_that_has_been_marked_for_deletion.cs b/src/EventStore.Core.Tests/TransactionLog/when_unlocking_a_tfchunk_that_has_been_marked_for_deletion.cs index a0eb05cb364..85790aec2f2 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_unlocking_a_tfchunk_that_has_been_marked_for_deletion.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_unlocking_a_tfchunk_that_has_been_marked_for_deletion.cs @@ -1,4 +1,5 @@ using System.IO; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks.TFChunk; using NUnit.Framework; @@ -11,7 +12,7 @@ public class when_unlocking_a_tfchunk_that_has_been_marked_for_deletion : Specif public override void SetUp() { base.SetUp(); _chunk = TFChunkHelper.CreateNewChunk(Filename, 1000); - var reader = _chunk.AcquireReader(); + var reader = _chunk.AcquireReader(ITransactionFileTracker.NoOp); _chunk.MarkForDeletion(); reader.Release(); } diff --git a/src/EventStore.Core.XUnit.Tests/Messages/ITransactionFileTrackerFactoryExtensionsTests.cs b/src/EventStore.Core.XUnit.Tests/Messages/ITransactionFileTrackerFactoryExtensionsTests.cs index 7737bc014c5..10f4fe8af06 100644 --- a/src/EventStore.Core.XUnit.Tests/Messages/ITransactionFileTrackerFactoryExtensionsTests.cs +++ b/src/EventStore.Core.XUnit.Tests/Messages/ITransactionFileTrackerFactoryExtensionsTests.cs @@ -55,6 +55,9 @@ class FakeFactory : ITransactionFileTrackerFactory { record FakeTracker(string Username) : ITransactionFileTracker { public void OnRead(ILogRecord record, ITransactionFileTracker.Source source) { } + + public void OnRead(int bytesRead, ITransactionFileTracker.Source source) { + } } class FakeReadRequest : ClientMessage.ReadRequestMessage { diff --git a/src/EventStore.Core.XUnit.Tests/Scavenge/Infrastructure/Scenario.cs b/src/EventStore.Core.XUnit.Tests/Scavenge/Infrastructure/Scenario.cs index cd18fb822c3..d57293e5691 100644 --- a/src/EventStore.Core.XUnit.Tests/Scavenge/Infrastructure/Scenario.cs +++ b/src/EventStore.Core.XUnit.Tests/Scavenge/Infrastructure/Scenario.cs @@ -718,7 +718,8 @@ private void EmptyRequestedChunks(TFChunkDb db) { writethrough: false, initialReaderCount: 1, maxReaderCount: 1, - reduceFileCachePressure: false); + reduceFileCachePressure: false, + tracker: ITransactionFileTracker.NoOp); newChunk.CompleteScavenge(null); diff --git a/src/EventStore.Core/ClusterVNode.cs b/src/EventStore.Core/ClusterVNode.cs index 82df116cf69..75fcd23ad18 100644 --- a/src/EventStore.Core/ClusterVNode.cs +++ b/src/EventStore.Core/ClusterVNode.cs @@ -307,7 +307,7 @@ public ClusterVNode(ClusterVNodeOptions options, metricsConfiguration ??= new(); MetricsBootstrapper.Bootstrap(metricsConfiguration, dbConfig, trackers); - Db = new TFChunkDb(dbConfig); + Db = new TFChunkDb(dbConfig, trackers.TransactionFileTrackers.For(SystemAccounts.System)); TFChunkDbConfig CreateDbConfig( out SystemStatsHelper statsHelper, @@ -1465,6 +1465,7 @@ GossipAdvertiseInfo GetGossipAdvertiseInfo() { _workersHandler, epochManager, options.Cluster.ClusterSize, options.Cluster.UnsafeAllowSurplusNodes, + trackers.TransactionFileTrackers.For(SystemAccounts.SystemReplicationName), _queueStatsManager); AddTask(leaderReplicationService.Task); _mainBus.Subscribe(leaderReplicationService); diff --git a/src/EventStore.Core/Services/RedactionService.cs b/src/EventStore.Core/Services/RedactionService.cs index e285d089954..16dd1723e2a 100644 --- a/src/EventStore.Core/Services/RedactionService.cs +++ b/src/EventStore.Core/Services/RedactionService.cs @@ -254,7 +254,8 @@ private bool IsValidSwitchChunkRequest(string targetChunkFile, string newChunkFi initialReaderCount: 1, maxReaderCount: 1, optimizeReadSideCache: false, - reduceFileCachePressure: true); + reduceFileCachePressure: true, + tracker: _tfTracker); } catch (HashValidationException) { failReason = SwitchChunkResult.NewChunkHashInvalid; return false; diff --git a/src/EventStore.Core/Services/Replication/LeaderReplicationService.cs b/src/EventStore.Core/Services/Replication/LeaderReplicationService.cs index 05a7ab888de..483cf8b5101 100644 --- a/src/EventStore.Core/Services/Replication/LeaderReplicationService.cs +++ b/src/EventStore.Core/Services/Replication/LeaderReplicationService.cs @@ -16,6 +16,7 @@ using EventStore.Core.Services.Monitoring.Stats; using EventStore.Core.Services.Storage.EpochManager; using EventStore.Core.Services.Transport.Tcp; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.Chunks.TFChunk; using EventStore.Core.TransactionLog.LogRecords; @@ -62,7 +63,7 @@ public string Name { private readonly IEpochManager _epochManager; private readonly int _clusterSize; private readonly bool _unsafeAllowSurplusNodes; - + private readonly ITransactionFileTracker _tfTracker; private readonly Thread _mainLoopThread; private volatile bool _stop; private readonly QueueStatsCollector _queueStats; @@ -93,6 +94,7 @@ public LeaderReplicationService( IEpochManager epochManager, int clusterSize, bool unsafeAllowSurplusNodes, + ITransactionFileTracker tfTracker, QueueStatsManager queueStatsManager) { Ensure.NotNull(publisher, "publisher"); Ensure.NotEmptyGuid(instanceId, "instanceId"); @@ -108,6 +110,7 @@ public LeaderReplicationService( _epochManager = epochManager; _clusterSize = clusterSize; _unsafeAllowSurplusNodes = unsafeAllowSurplusNodes; + _tfTracker = tfTracker; _queueStats = queueStatsManager.CreateQueueStatsCollector("Leader Replication Service"); _lastRolesAssignmentTimestamp = _stopwatch.Elapsed; @@ -357,7 +360,7 @@ private long SetSubscriptionPosition(ReplicaSubscription sub, Debug.Assert(chunk != null, string.Format( "Chunk for LogPosition {0} (0x{0:X}) is null in LeaderReplicationService! Replica: [{1},C:{2},S:{3}]", logPosition, sub.ReplicaEndPoint, sub.ConnectionId, sub.SubscriptionId)); - var bulkReader = chunk.AcquireReader(); + var bulkReader = chunk.AcquireReader(_tfTracker); if (chunk.ChunkHeader.IsScavenged && (chunkId == Guid.Empty || chunkId != chunk.ChunkHeader.ChunkId)) { var chunkStartPos = chunk.ChunkHeader.ChunkStartPosition; if (verbose) { diff --git a/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs b/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs index 26a7fdb97bc..5882e28464a 100644 --- a/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs +++ b/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs @@ -18,6 +18,7 @@ public class SystemAccounts { public static readonly string SystemIndexCommitterName = "system-index-committer"; public static readonly string SystemPersistentSubscriptionsName = "system-persistent-subscriptions"; public static readonly string SystemRedactionName = "system-redaction"; + public static readonly string SystemReplicationName = "system-replication"; public static readonly string SystemScavengeName = "system-scavenge"; public static readonly string SystemSubscriptionsName = "system-subscriptions"; public static readonly string SystemTelemetryName = "system-telemetry"; diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs index e92e408d63f..3e664355a47 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs @@ -165,11 +165,11 @@ private TFChunk(string filename, } public static TFChunk FromCompletedFile(string filename, bool verifyHash, bool unbufferedRead, - int initialReaderCount, int maxReaderCount, bool optimizeReadSideCache = false, bool reduceFileCachePressure = false) { + int initialReaderCount, int maxReaderCount, ITransactionFileTracker tracker, bool optimizeReadSideCache = false, bool reduceFileCachePressure = false) { var chunk = new TFChunk(filename, initialReaderCount, maxReaderCount, TFConsts.MidpointsDepth, false, unbufferedRead, false, reduceFileCachePressure); try { - chunk.InitCompleted(verifyHash, optimizeReadSideCache); + chunk.InitCompleted(verifyHash, optimizeReadSideCache, tracker); } catch { chunk.Dispose(); throw; @@ -179,7 +179,7 @@ public static TFChunk FromCompletedFile(string filename, bool verifyHash, bool u } public static TFChunk FromOngoingFile(string filename, int writePosition, bool checkSize, bool unbuffered, - bool writethrough, int initialReaderCount, int maxReaderCount, bool reduceFileCachePressure) { + bool writethrough, int initialReaderCount, int maxReaderCount, bool reduceFileCachePressure, ITransactionFileTracker tracker) { var chunk = new TFChunk(filename, initialReaderCount, maxReaderCount, @@ -188,7 +188,7 @@ public static TFChunk FromOngoingFile(string filename, int writePosition, bool c unbuffered, writethrough, reduceFileCachePressure); try { - chunk.InitOngoing(writePosition, checkSize); + chunk.InitOngoing(writePosition, checkSize, tracker); } catch { chunk.Dispose(); throw; @@ -207,13 +207,13 @@ public static TFChunk CreateNew(string filename, bool writethrough, int initialReaderCount, int maxReaderCount, - bool reduceFileCachePressure) { - + bool reduceFileCachePressure, + ITransactionFileTracker tracker) { var size = GetAlignedSize(chunkSize + ChunkHeader.Size + ChunkFooter.Size); var chunkHeader = new ChunkHeader(CurrentChunkVersion, chunkSize, chunkStartNumber, chunkEndNumber, isScavenged, Guid.NewGuid()); return CreateWithHeader(filename, chunkHeader, size, inMem, unbuffered, writethrough, initialReaderCount, maxReaderCount, - reduceFileCachePressure); + reduceFileCachePressure, tracker); } public static TFChunk CreateWithHeader(string filename, @@ -224,7 +224,8 @@ public static TFChunk CreateWithHeader(string filename, bool writethrough, int initialReaderCount, int maxReaderCount, - bool reduceFileCachePressure) { + bool reduceFileCachePressure, + ITransactionFileTracker tracker) { var chunk = new TFChunk(filename, initialReaderCount, maxReaderCount, @@ -234,7 +235,7 @@ public static TFChunk CreateWithHeader(string filename, writethrough, reduceFileCachePressure); try { - chunk.InitNew(header, fileSize); + chunk.InitNew(header, fileSize, tracker); } catch { chunk.Dispose(); throw; @@ -243,7 +244,7 @@ public static TFChunk CreateWithHeader(string filename, return chunk; } - private void InitCompleted(bool verifyHash, bool optimizeReadSideCache) { + private void InitCompleted(bool verifyHash, bool optimizeReadSideCache, ITransactionFileTracker tracker) { var fileInfo = new FileInfo(_filename); if (!fileInfo.Exists) throw new CorruptDatabaseException(new ChunkNotFoundException(_filename)); @@ -298,10 +299,10 @@ private void InitCompleted(bool verifyHash, bool optimizeReadSideCache) { _readSide.RequestCaching(); if (verifyHash) - VerifyFileHash(); + VerifyFileHash(tracker); } - private void InitNew(ChunkHeader chunkHeader, int fileSize) { + private void InitNew(ChunkHeader chunkHeader, int fileSize, ITransactionFileTracker tracker) { Ensure.NotNull(chunkHeader, "chunkHeader"); Ensure.Positive(fileSize, "fileSize"); @@ -325,11 +326,11 @@ private void InitNew(ChunkHeader chunkHeader, int fileSize) { // Always cache the active chunk // If the chunk is scavenged we will definitely mark it readonly before we are done writing to it. if (!chunkHeader.IsScavenged) { - CacheInMemory(); + CacheInMemory(tracker); } } - private void InitOngoing(int writePosition, bool checkSize) { + private void InitOngoing(int writePosition, bool checkSize, ITransactionFileTracker tracker) { Ensure.Nonnegative(writePosition, "writePosition"); var fileInfo = new FileInfo(_filename); if (!fileInfo.Exists) @@ -363,7 +364,7 @@ private void InitOngoing(int writePosition, bool checkSize) { _readSide = new TFChunkReadSideUnscavenged(this); // Always cache the active chunk - CacheInMemory(); + CacheInMemory(tracker); } // If one file stream writes to a file, and another file stream happens to have that part of @@ -537,12 +538,12 @@ private void SetAttributes(string filename, bool isReadOnly) { }); } - public void VerifyFileHash() { + public void VerifyFileHash(ITransactionFileTracker tracker) { if (!IsReadOnly) throw new InvalidOperationException("You can't verify hash of not-completed TFChunk."); Log.Debug("Verifying hash for TFChunk '{chunk}'...", _filename); - using (var reader = AcquireReader()) { + using (var reader = AcquireReader(tracker)) { reader.Stream.Seek(0, SeekOrigin.Begin); var stream = reader.Stream; var footer = _chunkFooter; @@ -627,7 +628,7 @@ public long GetActualRawPosition(long logicalPosition, ITransactionFileTracker t return GetRawPosition(actualPosition); } - public void CacheInMemory() { + public void CacheInMemory(ITransactionFileTracker tracker) { lock (_cachedDataLock) { if (_inMem) return; @@ -643,7 +644,7 @@ public void CacheInMemory() { // we won the right to cache var sw = Stopwatch.StartNew(); try { - BuildCacheArray(); + BuildCacheArray(tracker); } catch (OutOfMemoryException) { Log.Error("CACHING FAILED due to OutOfMemory exception in TFChunk {chunk}.", this); return; @@ -687,8 +688,8 @@ public void CacheInMemory() { } } - private void BuildCacheArray() { - var workItem = AcquireFileReader(); + private void BuildCacheArray(ITransactionFileTracker tracker) { + var workItem = AcquireFileReader(tracker); try { if (workItem.IsMemory) throw new InvalidOperationException( @@ -1185,14 +1186,14 @@ private void ReturnReaderWorkItem(ReaderWorkItem item) { } } - public TFChunkBulkReader AcquireReader() { - if (TryAcquireBulkMemReader(out var reader)) + public TFChunkBulkReader AcquireReader(ITransactionFileTracker tracker) { + if (TryAcquireBulkMemReader(tracker, out var reader)) return reader; - return AcquireFileReader(); + return AcquireFileReader(tracker); } - private TFChunkBulkReader AcquireFileReader() { + private TFChunkBulkReader AcquireFileReader(ITransactionFileTracker tracker) { Interlocked.Increment(ref _fileStreamCount); if (_selfdestructin54321) { if (Interlocked.Decrement(ref _fileStreamCount) == 0) { @@ -1204,7 +1205,7 @@ private TFChunkBulkReader AcquireFileReader() { // if we get here, then we reserved TFChunk for sure so no one should dispose of chunk file // until client returns dedicated reader - return new TFChunkBulkReader(this, GetSequentialReaderFileStream(), isMemory: false); + return new TFChunkBulkReader(this, GetSequentialReaderFileStream(), isMemory: false, tracker); } private Stream GetSequentialReaderFileStream() { @@ -1218,7 +1219,7 @@ private Stream GetSequentialReaderFileStream() { // (a) doesn't block if a file reader would be acceptable instead // (we might be in the middle of caching which could take a while) // (b) _does_ throw if we can't get a memstream and a filestream is not acceptable - private bool TryAcquireBulkMemReader(out TFChunkBulkReader reader) { + private bool TryAcquireBulkMemReader(ITransactionFileTracker tracker, out TFChunkBulkReader reader) { reader = null; if (IsReadOnly) { @@ -1229,7 +1230,7 @@ private bool TryAcquireBulkMemReader(out TFChunkBulkReader reader) { return false; try { - return TryCreateBulkMemReader(out reader); + return TryCreateBulkMemReader(tracker, out reader); } finally { Monitor.Exit(_cachedDataLock); } @@ -1237,7 +1238,7 @@ private bool TryAcquireBulkMemReader(out TFChunkBulkReader reader) { // chunk is not readonly so it should be cached and let us create a mem reader // (but might become readonly at any moment!) - if (TryCreateBulkMemReader(out reader)) + if (TryCreateBulkMemReader(tracker, out reader)) return true; // we couldn't get a memreader, maybe we just became readonly and got uncached. @@ -1252,7 +1253,7 @@ private bool TryAcquireBulkMemReader(out TFChunkBulkReader reader) { } // creates a bulk reader over a memstream as long as we are cached - private bool TryCreateBulkMemReader(out TFChunkBulkReader reader) { + private bool TryCreateBulkMemReader(ITransactionFileTracker tracker, out TFChunkBulkReader reader) { lock (_cachedDataLock) { if (_cacheStatus != CacheStatus.Cached) { reader = null; @@ -1264,7 +1265,7 @@ private bool TryCreateBulkMemReader(out TFChunkBulkReader reader) { Interlocked.Increment(ref _memStreamCount); var stream = new UnmanagedMemoryStream((byte*)_cachedData, _cachedLength); - reader = new TFChunkBulkReader(this, stream, isMemory: true); //qq tracking the bytes here would be nice, under a 'system-replication' + reader = new TFChunkBulkReader(this, stream, isMemory: true, tracker); return true; } } diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunkBulkReader.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunkBulkReader.cs index 8fdf7b9a55c..63c5e74a121 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunkBulkReader.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunkBulkReader.cs @@ -16,15 +16,18 @@ internal Stream Stream { private readonly TFChunk.TFChunk _chunk; private readonly Stream _stream; + private readonly ITransactionFileTracker _tfTracker; private bool _disposed; public bool IsMemory { get; init; } - internal TFChunkBulkReader(TFChunk.TFChunk chunk, Stream streamToUse, bool isMemory) { + internal TFChunkBulkReader(TFChunk.TFChunk chunk, Stream streamToUse, bool isMemory, + ITransactionFileTracker tfTracker) { Ensure.NotNull(chunk, "chunk"); Ensure.NotNull(streamToUse, "stream"); _chunk = chunk; _stream = streamToUse; IsMemory = isMemory; + _tfTracker = tfTracker; } ~TFChunkBulkReader() { @@ -62,6 +65,11 @@ public BulkReadResult ReadNextRawBytes(int count, byte[] buffer) { var oldPos = (int)_stream.Position; int bytesRead = _stream.Read(buffer, 0, count); + + _tfTracker.OnRead(bytesRead, Chunk.IsCached + ? ITransactionFileTracker.Source.ChunkCache + : ITransactionFileTracker.Source.Disk); + return new BulkReadResult(oldPos, bytesRead, isEof: _stream.Length == _stream.Position); } @@ -80,6 +88,11 @@ public BulkReadResult ReadNextDataBytes(int count, byte[] buffer) { Debug.Assert(toRead >= 0); _stream.Position = _stream.Position; // flush read buffer int bytesRead = _stream.Read(buffer, 0, toRead); + + _tfTracker.OnRead(bytesRead, Chunk.IsCached + ? ITransactionFileTracker.Source.ChunkCache + : ITransactionFileTracker.Source.Disk); + return new BulkReadResult(oldPos, bytesRead, isEof: _chunk.IsReadOnly && oldPos + bytesRead == _chunk.PhysicalDataSize); diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunkDb.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunkDb.cs index 1b6d0d1e160..9728b0c0054 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunkDb.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunkDb.cs @@ -10,16 +10,18 @@ namespace EventStore.Core.TransactionLog.Chunks { public class TFChunkDb : IDisposable { public readonly TFChunkDbConfig Config; + private readonly ITransactionFileTracker _tracker; public readonly TFChunkManager Manager; private readonly ILogger _log; private int _closed; - public TFChunkDb(TFChunkDbConfig config, ILogger log = null) { + public TFChunkDb(TFChunkDbConfig config, ITransactionFileTracker tracker = null, ILogger log = null) { Ensure.NotNull(config, "config"); Config = config; - Manager = new TFChunkManager(Config); + _tracker = tracker ?? ITransactionFileTracker.NoOp; + Manager = new TFChunkManager(Config, _tracker); _log = log ?? Serilog.Log.ForContext(); } @@ -75,14 +77,16 @@ public void Open(bool verifyHash = true, bool readOnly = false, int threads = 1) initialReaderCount: Config.InitialReaderCount, maxReaderCount: Config.MaxReaderCount, optimizeReadSideCache: Config.OptimizeReadSideCache, - reduceFileCachePressure: Config.ReduceFileCachePressure); + reduceFileCachePressure: Config.ReduceFileCachePressure, + tracker: _tracker); else { chunk = TFChunk.TFChunk.FromOngoingFile(chunkInfo.ChunkFileName, Config.ChunkSize, checkSize: false, unbuffered: Config.Unbuffered, writethrough: Config.WriteThrough, initialReaderCount: Config.InitialReaderCount, maxReaderCount: Config.MaxReaderCount, - reduceFileCachePressure: Config.ReduceFileCachePressure); + reduceFileCachePressure: Config.ReduceFileCachePressure, + tracker: _tracker); // chunk is full with data, we should complete it right here if (!readOnly) chunk.Complete(); @@ -93,7 +97,8 @@ public void Open(bool verifyHash = true, bool readOnly = false, int threads = 1) initialReaderCount: Config.InitialReaderCount, maxReaderCount: Config.MaxReaderCount, optimizeReadSideCache: Config.OptimizeReadSideCache, - reduceFileCachePressure: Config.ReduceFileCachePressure); + reduceFileCachePressure: Config.ReduceFileCachePressure, + tracker: _tracker); } // This call is theadsafe. @@ -121,7 +126,8 @@ public void Open(bool verifyHash = true, bool readOnly = false, int threads = 1) initialReaderCount: Config.InitialReaderCount, maxReaderCount: Config.MaxReaderCount, optimizeReadSideCache: Config.OptimizeReadSideCache, - reduceFileCachePressure: Config.ReduceFileCachePressure); + reduceFileCachePressure: Config.ReduceFileCachePressure, + tracker: _tracker); if (lastChunk.ChunkFooter.LogicalDataSize != chunkLocalPos) { lastChunk.Dispose(); throw new CorruptDatabaseException(new BadChunkInDatabaseException( @@ -146,7 +152,8 @@ public void Open(bool verifyHash = true, bool readOnly = false, int threads = 1) unbuffered: Config.Unbuffered, writethrough: Config.WriteThrough, initialReaderCount: Config.InitialReaderCount, maxReaderCount: Config.MaxReaderCount, - reduceFileCachePressure: Config.ReduceFileCachePressure); + reduceFileCachePressure: Config.ReduceFileCachePressure, + tracker: _tracker); Manager.AddChunk(lastChunk); } } @@ -172,7 +179,7 @@ public void Open(bool verifyHash = true, bool readOnly = false, int threads = 1) for (int chunkNum = lastBgChunkNum; chunkNum >= 0;) { var chunk = Manager.GetChunk(chunkNum); try { - chunk.VerifyFileHash(); + chunk.VerifyFileHash(_tracker); } catch (FileBeingDeletedException exc) { _log.Debug( "{exceptionType} exception was thrown while doing background validation of chunk {chunk}.", diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunkManager.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunkManager.cs index 195de89a643..4dc850993b1 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunkManager.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunkManager.cs @@ -20,6 +20,7 @@ public int ChunksCount { private readonly TFChunkDbConfig _config; private readonly TFChunk.TFChunk[] _chunks = new TFChunk.TFChunk[MaxChunksCount]; + private readonly ITransactionFileTracker _tracker; private volatile int _chunksCount; private volatile bool _cachingEnabled; @@ -27,9 +28,10 @@ public int ChunksCount { private int _backgroundPassesRemaining; private int _backgroundRunning; - public TFChunkManager(TFChunkDbConfig config) { + public TFChunkManager(TFChunkDbConfig config, ITransactionFileTracker tracker) { Ensure.NotNull(config, "config"); _config = config; + _tracker = tracker; } public void EnableCaching() { @@ -86,7 +88,7 @@ private void CacheUncacheReadOnlyChunks() { for (int chunkNum = lastChunkToCache; chunkNum < _chunksCount;) { var chunk = _chunks[chunkNum]; if (chunk.IsReadOnly) - chunk.CacheInMemory(); + chunk.CacheInMemory(_tracker); chunkNum = chunk.ChunkHeader.ChunkEndNumber + 1; } } @@ -101,7 +103,8 @@ public TFChunk.TFChunk CreateTempChunk(ChunkHeader chunkHeader, int fileSize) { _config.WriteThrough, _config.InitialReaderCount, _config.MaxReaderCount, - _config.ReduceFileCachePressure); + _config.ReduceFileCachePressure, + _tracker); } public TFChunk.TFChunk AddNewChunk() { @@ -118,7 +121,8 @@ public TFChunk.TFChunk AddNewChunk() { writethrough: _config.WriteThrough, initialReaderCount: _config.InitialReaderCount, maxReaderCount: _config.MaxReaderCount, - reduceFileCachePressure: _config.ReduceFileCachePressure); + reduceFileCachePressure: _config.ReduceFileCachePressure, + _tracker); AddChunk(chunk); return chunk; } @@ -143,7 +147,8 @@ public TFChunk.TFChunk AddNewChunk(ChunkHeader chunkHeader, int fileSize) { writethrough: _config.WriteThrough, initialReaderCount: _config.InitialReaderCount, maxReaderCount: _config.MaxReaderCount, - reduceFileCachePressure: _config.ReduceFileCachePressure); + reduceFileCachePressure: _config.ReduceFileCachePressure, + _tracker); AddChunk(chunk); return chunk; } @@ -200,7 +205,7 @@ public TFChunk.TFChunk SwitchChunk(TFChunk.TFChunk chunk, bool verifyHash, } newChunk = TFChunk.TFChunk.FromCompletedFile(newFileName, verifyHash, _config.Unbuffered, - _config.InitialReaderCount, _config.MaxReaderCount, _config.OptimizeReadSideCache, _config.ReduceFileCachePressure ); + _config.InitialReaderCount, _config.MaxReaderCount, _tracker, _config.OptimizeReadSideCache, _config.ReduceFileCachePressure ); } lock (_chunksLocker) { diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunkScavenger.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunkScavenger.cs index c8edc515337..7ea3a5b1db2 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunkScavenger.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunkScavenger.cs @@ -201,7 +201,8 @@ private void ScavengeChunk(bool alwaysKeepScavenged, TFChunk.TFChunk oldChunk, writethrough: _db.Config.WriteThrough, initialReaderCount: _db.Config.InitialReaderCount, maxReaderCount: _db.Config.MaxReaderCount, - reduceFileCachePressure: _db.Config.ReduceFileCachePressure); + reduceFileCachePressure: _db.Config.ReduceFileCachePressure, + _tfTracker); } catch (IOException exc) { _logger.Error(exc, "IOException during creating new chunk for scavenging purposes. Stopping scavenging process..."); @@ -440,7 +441,8 @@ private static bool MergeChunks( writethrough: db.Config.WriteThrough, initialReaderCount: db.Config.InitialReaderCount, maxReaderCount: db.Config.MaxReaderCount, - reduceFileCachePressure: db.Config.ReduceFileCachePressure); + reduceFileCachePressure: db.Config.ReduceFileCachePressure, + tracker); } catch (IOException exc) { logger.Error(exc, "IOException during creating new chunk for scavenging merge purposes. Stopping scavenging merge process..."); diff --git a/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs b/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs index 01b79c19542..3d50af87bdc 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs @@ -31,7 +31,12 @@ public void OnRead(ILogRecord record, ITransactionFileTracker.Source source) { return; var (bytes, events) = _subMetrics[(int)source]; - bytes.Add(prepare.Data.Length + prepare.Metadata.Length); + bytes.Add(prepare.Data.Length + prepare.Metadata.Length); // approximate events.Add(1); } + + public void OnRead(int bytesRead, ITransactionFileTracker.Source source) { + var (bytes, _) = _subMetrics[(int)source]; + bytes.Add(bytesRead); + } } diff --git a/src/EventStore.Core/TransactionLog/ITransactionFileTracker.cs b/src/EventStore.Core/TransactionLog/ITransactionFileTracker.cs index c676e9cc636..fdcee139ab7 100644 --- a/src/EventStore.Core/TransactionLog/ITransactionFileTracker.cs +++ b/src/EventStore.Core/TransactionLog/ITransactionFileTracker.cs @@ -6,6 +6,7 @@ namespace EventStore.Core.TransactionLog; public interface ITransactionFileTracker { void OnRead(ILogRecord record, Source source); + void OnRead(int bytesRead, Source source); enum Source { Unknown, @@ -19,6 +20,6 @@ enum Source { } file class NoOp : ITransactionFileTracker { - public void OnRead(ILogRecord record, ITransactionFileTracker.Source source) { - } + public void OnRead(ILogRecord record, ITransactionFileTracker.Source source) { } + public void OnRead(int bytesRead, ITransactionFileTracker.Source source) { } } diff --git a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkManagerForExecutor.cs b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkManagerForExecutor.cs index 7440aa15088..cea3a52665a 100644 --- a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkManagerForExecutor.cs +++ b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkManagerForExecutor.cs @@ -22,7 +22,7 @@ public ChunkManagerForExecutor(ILogger logger, TFChunkManager manager, TFChunkDb public IChunkWriterForExecutor CreateChunkWriter( IChunkReaderForExecutor sourceChunk) { - return new ChunkWriterForExecutor(_logger, this, _dbConfig, sourceChunk); + return new ChunkWriterForExecutor(_logger, this, _dbConfig, sourceChunk, _tracker); } public IChunkReaderForExecutor GetChunkReaderFor(long position) { diff --git a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkWriterForExecutor.cs b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkWriterForExecutor.cs index 380a954d7de..665969576c6 100644 --- a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkWriterForExecutor.cs +++ b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkWriterForExecutor.cs @@ -20,7 +20,8 @@ public ChunkWriterForExecutor( ILogger logger, ChunkManagerForExecutor manager, TFChunkDbConfig dbConfig, - IChunkReaderForExecutor sourceChunk) { + IChunkReaderForExecutor sourceChunk, + ITransactionFileTracker tracker) { _logger = logger; _manager = manager; @@ -44,7 +45,8 @@ public ChunkWriterForExecutor( writethrough: dbConfig.WriteThrough, initialReaderCount: dbConfig.InitialReaderCount, maxReaderCount: dbConfig.MaxReaderCount, - reduceFileCachePressure: dbConfig.ReduceFileCachePressure); + reduceFileCachePressure: dbConfig.ReduceFileCachePressure, + tracker); } public string FileName { get; } From 3927a1c5b8ed46044ed479deb1e530b4ce080d37 Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Tue, 26 Nov 2024 07:33:46 +0000 Subject: [PATCH 32/38] loose ends --- .../Chunks/TFChunkTrackerTests.cs | 40 +++++++++++++++---- .../Chunks/TFChunk/ReaderWorkItem.cs | 6 +-- .../TransactionLog/Chunks/TFChunk/TFChunk.cs | 2 +- .../Chunks/TransactionFileTracker.cs | 25 +++++++----- .../Chunks/TransactionFileTrackerFactory.cs | 6 +-- 5 files changed, 55 insertions(+), 24 deletions(-) diff --git a/src/EventStore.Core.XUnit.Tests/TransactionLog/Chunks/TFChunkTrackerTests.cs b/src/EventStore.Core.XUnit.Tests/TransactionLog/Chunks/TFChunkTrackerTests.cs index abfd7cba982..1f463d55620 100644 --- a/src/EventStore.Core.XUnit.Tests/TransactionLog/Chunks/TFChunkTrackerTests.cs +++ b/src/EventStore.Core.XUnit.Tests/TransactionLog/Chunks/TFChunkTrackerTests.cs @@ -37,7 +37,7 @@ public void can_observe_prepare_log() { data: new byte[5], meta: new byte[5]); - _sut.OnRead(prepare, source: ITransactionFileTracker.Source.Unknown); //qqqq update these tests + _sut.OnRead(prepare, source: ITransactionFileTracker.Source.Disk); _listener.Observe(); AssertEventsRead(1); @@ -47,7 +47,7 @@ public void can_observe_prepare_log() { [Fact] public void disregard_system_log() { var system = CreateSystemRecord(); - _sut.OnRead(system, source: ITransactionFileTracker.Source.Unknown); + _sut.OnRead(system, source: ITransactionFileTracker.Source.Disk); _listener.Observe(); AssertEventsRead(0); @@ -57,7 +57,7 @@ public void disregard_system_log() { [Fact] public void disregard_commit_log() { var system = CreateCommit(); - _sut.OnRead(system, source: ITransactionFileTracker.Source.Unknown); + _sut.OnRead(system, source: ITransactionFileTracker.Source.Disk); _listener.Observe(); AssertEventsRead(0); @@ -79,15 +79,41 @@ private void AssertMeasurements(string instrumentName, long? expectedValue) { Assert.Collection( actual, m => { + AssertTags(m.Tags, "unknown"); + Assert.Equal(0, m.Value); + }, + m => { + AssertTags(m.Tags, "archive"); + Assert.Equal(0, m.Value); + }, + m => { + AssertTags(m.Tags, "chunk-cache"); + Assert.Equal(0, m.Value); + }, + m => { + AssertTags(m.Tags, "disk"); Assert.Equal(expectedValue, m.Value); - Assert.Collection(m.Tags.ToArray(), t => { - Assert.Equal("activity", t.Key); - Assert.Equal("read", t.Value); - }); }); } } + private void AssertTags(KeyValuePair[] tags, string source) { + Assert.Collection( + tags.ToArray(), + t => { + Assert.Equal("activity", t.Key); + Assert.Equal("read", t.Value); + }, + t => { + Assert.Equal("source", t.Key); + Assert.Equal(source, t.Value); + }, + t => { + Assert.Equal("user", t.Key); + Assert.Equal("alice", t.Value); + }); + + } private static PrepareLogRecord CreatePrepare(byte[] data, byte[] meta) { return new PrepareLogRecord(42, Guid.NewGuid(), Guid.NewGuid(), 42, 42, "tests", null, 42, DateTime.Now, PrepareFlags.Data, "type-test", null, data, meta); diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/ReaderWorkItem.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/ReaderWorkItem.cs index bebc0881cbe..f22ab832918 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/ReaderWorkItem.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/ReaderWorkItem.cs @@ -3,7 +3,7 @@ using System.IO; namespace EventStore.Core.TransactionLog.Chunks.TFChunk { - // ReaderWorkItems are checked out of a pool and used by one thread at a time + // ReaderWorkItems are always checked out of a pool and used by one thread at a time internal class ReaderWorkItem { public readonly Stream Stream; public readonly BinaryReader Reader; @@ -17,13 +17,11 @@ public ReaderWorkItem(Stream stream, BinaryReader reader, bool isMemory) { public ITransactionFileTracker Tracker { get; private set; } = ITransactionFileTracker.NoOp; - //qq is this always called? public void OnCheckedOut(ITransactionFileTracker tracker) { Tracker = tracker; } - //qq rename, this needs to be called before being returned - public void OnReturned() { + public void OnReturning() { Tracker = ITransactionFileTracker.NoOp; } } diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs index 3e664355a47..6858303d80b 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs @@ -1146,7 +1146,7 @@ private ReaderWorkItem GetReaderWorkItemImpl() { } private void ReturnReaderWorkItem(ReaderWorkItem item) { - item.OnReturned(); + item.OnReturning(); if (item.IsMemory) { // we avoid taking the _cachedDataLock here every time because we would be // contending with other reader threads also returning readerworkitems. diff --git a/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs b/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs index 3d50af87bdc..6de8220fa52 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs @@ -7,18 +7,25 @@ namespace EventStore.Core.TransactionLog.Chunks; public class TFChunkTracker : ITransactionFileTracker { - private readonly (CounterSubMetric, CounterSubMetric)[] _subMetrics; + private readonly (CounterSubMetric Events, CounterSubMetric Bytes)[] _subMetrics; public TFChunkTracker(CounterMetric eventMetric, CounterMetric byteMetric, string user) { _subMetrics = new (CounterSubMetric, CounterSubMetric)[(int)(ITransactionFileTracker.Source.EnumLength)]; for (var i = 0; i < _subMetrics.Length; i++) { - var source = $"{(ITransactionFileTracker.Source)i}"; + var sourceName = NameOf((ITransactionFileTracker.Source)i); _subMetrics[i] = ( - CreateSubMetric(eventMetric, source, user), - CreateSubMetric(byteMetric, source, user)); + Events: CreateSubMetric(eventMetric, sourceName, user), + Bytes: CreateSubMetric(byteMetric, sourceName, user)); } } + static string NameOf(ITransactionFileTracker.Source source) => source switch { + ITransactionFileTracker.Source.Archive => "archive", + ITransactionFileTracker.Source.ChunkCache => "chunk-cache", + ITransactionFileTracker.Source.Disk => "disk", + _ => "unknown", + }; + static CounterSubMetric CreateSubMetric(CounterMetric metric, string source, string user) { var readTag = new KeyValuePair("activity", "read"); var sourceTag = new KeyValuePair("source", source); @@ -30,13 +37,13 @@ public void OnRead(ILogRecord record, ITransactionFileTracker.Source source) { if (record is not PrepareLogRecord prepare) return; - var (bytes, events) = _subMetrics[(int)source]; - bytes.Add(prepare.Data.Length + prepare.Metadata.Length); // approximate - events.Add(1); + var subMetrics = _subMetrics[(int)source]; + subMetrics.Bytes.Add(prepare.Data.Length + prepare.Metadata.Length); // approximate + subMetrics.Events.Add(1); } public void OnRead(int bytesRead, ITransactionFileTracker.Source source) { - var (bytes, _) = _subMetrics[(int)source]; - bytes.Add(bytesRead); + var subMetrics = _subMetrics[(int)source]; + subMetrics.Bytes.Add(bytesRead); } } diff --git a/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTrackerFactory.cs b/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTrackerFactory.cs index eb431564f40..69a91cf622b 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTrackerFactory.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTrackerFactory.cs @@ -16,11 +16,11 @@ public TransactionFileTrackerFactory(CounterMetric eventMetric, CounterMetric by } public ITransactionFileTracker GetOrAdd(string user) { - return _trackersByUser.GetOrAdd(user, Create); + return _trackersByUser.GetOrAdd(user, Create, (_eventMetric, _byteMetric)); } - private ITransactionFileTracker Create(string user) { - var tracker = new TFChunkTracker(_eventMetric, _byteMetric, user); + private static ITransactionFileTracker Create(string user, (CounterMetric EventMetric, CounterMetric ByteMetric) metrics) { + var tracker = new TFChunkTracker(metrics.EventMetric, metrics.ByteMetric, user); return tracker; } From 9ac05790bc17d62dafc349b5ce669f53cc9f3697 Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Tue, 26 Nov 2024 07:45:42 +0000 Subject: [PATCH 33/38] rename Disk source to File, since when reading from File we might still end up in the OS page cache and not the Disk --- .../TransactionLog/Chunks/TFChunkTrackerTests.cs | 8 ++++---- .../TransactionLog/Chunks/TFChunk/TFChunkReadSide.cs | 4 ++-- .../TransactionLog/Chunks/TFChunkBulkReader.cs | 4 ++-- .../TransactionLog/Chunks/TransactionFileTracker.cs | 2 +- .../TransactionLog/ITransactionFileTracker.cs | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/EventStore.Core.XUnit.Tests/TransactionLog/Chunks/TFChunkTrackerTests.cs b/src/EventStore.Core.XUnit.Tests/TransactionLog/Chunks/TFChunkTrackerTests.cs index 1f463d55620..8c6140d1509 100644 --- a/src/EventStore.Core.XUnit.Tests/TransactionLog/Chunks/TFChunkTrackerTests.cs +++ b/src/EventStore.Core.XUnit.Tests/TransactionLog/Chunks/TFChunkTrackerTests.cs @@ -37,7 +37,7 @@ public void can_observe_prepare_log() { data: new byte[5], meta: new byte[5]); - _sut.OnRead(prepare, source: ITransactionFileTracker.Source.Disk); + _sut.OnRead(prepare, source: ITransactionFileTracker.Source.File); _listener.Observe(); AssertEventsRead(1); @@ -47,7 +47,7 @@ public void can_observe_prepare_log() { [Fact] public void disregard_system_log() { var system = CreateSystemRecord(); - _sut.OnRead(system, source: ITransactionFileTracker.Source.Disk); + _sut.OnRead(system, source: ITransactionFileTracker.Source.File); _listener.Observe(); AssertEventsRead(0); @@ -57,7 +57,7 @@ public void disregard_system_log() { [Fact] public void disregard_commit_log() { var system = CreateCommit(); - _sut.OnRead(system, source: ITransactionFileTracker.Source.Disk); + _sut.OnRead(system, source: ITransactionFileTracker.Source.File); _listener.Observe(); AssertEventsRead(0); @@ -91,7 +91,7 @@ private void AssertMeasurements(string instrumentName, long? expectedValue) { Assert.Equal(0, m.Value); }, m => { - AssertTags(m.Tags, "disk"); + AssertTags(m.Tags, "file"); Assert.Equal(expectedValue, m.Value); }); } diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunkReadSide.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunkReadSide.cs index 6bbebf13d12..baa1cb662dd 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunkReadSide.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunkReadSide.cs @@ -625,7 +625,7 @@ record = null; record = LogRecord.ReadFrom(workItem.Reader, length); workItem.Tracker.OnRead(record, Chunk.IsCached ? ITransactionFileTracker.Source.ChunkCache : - ITransactionFileTracker.Source.Disk); + ITransactionFileTracker.Source.File); int suffixLength = workItem.Reader.ReadInt32(); ValidateSuffixLength(length, suffixLength, actualPosition); @@ -708,7 +708,7 @@ record = null; record = LogRecord.ReadFrom(workItem.Reader, length); workItem.Tracker.OnRead(record, Chunk.IsCached ? ITransactionFileTracker.Source.ChunkCache : - ITransactionFileTracker.Source.Disk); + ITransactionFileTracker.Source.File); return true; } diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunkBulkReader.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunkBulkReader.cs index 63c5e74a121..58128bdafc3 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunkBulkReader.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunkBulkReader.cs @@ -68,7 +68,7 @@ public BulkReadResult ReadNextRawBytes(int count, byte[] buffer) { _tfTracker.OnRead(bytesRead, Chunk.IsCached ? ITransactionFileTracker.Source.ChunkCache - : ITransactionFileTracker.Source.Disk); + : ITransactionFileTracker.Source.File); return new BulkReadResult(oldPos, bytesRead, isEof: _stream.Length == _stream.Position); } @@ -91,7 +91,7 @@ public BulkReadResult ReadNextDataBytes(int count, byte[] buffer) { _tfTracker.OnRead(bytesRead, Chunk.IsCached ? ITransactionFileTracker.Source.ChunkCache - : ITransactionFileTracker.Source.Disk); + : ITransactionFileTracker.Source.File); return new BulkReadResult(oldPos, bytesRead, diff --git a/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs b/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs index 6de8220fa52..173294a97ae 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs @@ -22,7 +22,7 @@ public TFChunkTracker(CounterMetric eventMetric, CounterMetric byteMetric, strin static string NameOf(ITransactionFileTracker.Source source) => source switch { ITransactionFileTracker.Source.Archive => "archive", ITransactionFileTracker.Source.ChunkCache => "chunk-cache", - ITransactionFileTracker.Source.Disk => "disk", + ITransactionFileTracker.Source.File => "file", _ => "unknown", }; diff --git a/src/EventStore.Core/TransactionLog/ITransactionFileTracker.cs b/src/EventStore.Core/TransactionLog/ITransactionFileTracker.cs index fdcee139ab7..fa2cf00ef35 100644 --- a/src/EventStore.Core/TransactionLog/ITransactionFileTracker.cs +++ b/src/EventStore.Core/TransactionLog/ITransactionFileTracker.cs @@ -12,7 +12,7 @@ enum Source { Unknown, Archive, ChunkCache, - Disk, + File, EnumLength, }; From 9ab9fc5a35a5997b786c4fc2bae37634d1814f72 Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Tue, 26 Nov 2024 08:28:03 +0000 Subject: [PATCH 34/38] minor adjustment --- .../TransactionLog/Chunks/TFChunkBulkReader.cs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunkBulkReader.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunkBulkReader.cs index 58128bdafc3..cc49d6be5dd 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunkBulkReader.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunkBulkReader.cs @@ -66,7 +66,7 @@ public BulkReadResult ReadNextRawBytes(int count, byte[] buffer) { var oldPos = (int)_stream.Position; int bytesRead = _stream.Read(buffer, 0, count); - _tfTracker.OnRead(bytesRead, Chunk.IsCached + _tfTracker.OnRead(bytesRead, IsMemory ? ITransactionFileTracker.Source.ChunkCache : ITransactionFileTracker.Source.File); @@ -89,7 +89,7 @@ public BulkReadResult ReadNextDataBytes(int count, byte[] buffer) { _stream.Position = _stream.Position; // flush read buffer int bytesRead = _stream.Read(buffer, 0, toRead); - _tfTracker.OnRead(bytesRead, Chunk.IsCached + _tfTracker.OnRead(bytesRead, IsMemory ? ITransactionFileTracker.Source.ChunkCache : ITransactionFileTracker.Source.File); From 544908c2a9a933e8162a63a95a06446bac8c80af Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Wed, 27 Nov 2024 07:05:30 +0000 Subject: [PATCH 35/38] reduce granularity --- src/EventStore.Core/Index/TableIndex.cs | 4 ++-- .../Services/UserManagement/SystemAccounts.cs | 11 +++++------ 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/src/EventStore.Core/Index/TableIndex.cs b/src/EventStore.Core/Index/TableIndex.cs index b214a4f85eb..47bbd692c9d 100644 --- a/src/EventStore.Core/Index/TableIndex.cs +++ b/src/EventStore.Core/Index/TableIndex.cs @@ -311,7 +311,7 @@ private void ReadOffQueue() { Log.Debug("Performing manual index merge."); _isManualMergePending = false; - using (var reader = _tfReaderFactory(SystemAccounts.SystemIndexMergeName)) { + using (var reader = _tfReaderFactory(SystemAccounts.SystemName)) { var manualMergeResult = _indexMap.TryManualMerge( (streamId, currentHash) => UpgradeHash(streamId, currentHash), entry => reader.ExistsAt(entry.Position), @@ -362,7 +362,7 @@ private void ReadOffQueue() { _indexMap.SaveToFile(indexmapFile); if (addResult.CanMergeAny) { - using (var reader = _tfReaderFactory(SystemAccounts.SystemIndexMergeName)) { + using (var reader = _tfReaderFactory(SystemAccounts.SystemName)) { MergeResult mergeResult; do { mergeResult = _indexMap.TryMergeOneLevel( diff --git a/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs b/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs index 5882e28464a..98c8f5e08a9 100644 --- a/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs +++ b/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs @@ -12,16 +12,15 @@ public class SystemAccounts { public static readonly ClaimsPrincipal Anonymous = new ClaimsPrincipal(new ClaimsIdentity(new Claim[]{new Claim(ClaimTypes.Anonymous, ""), })); public static readonly string SystemChaserName = "system-chaser"; - public static readonly string SystemEpochManagerName = "system-epoch-manager"; + public static readonly string SystemEpochManagerName = "system"; public static readonly string SystemName = "system"; - public static readonly string SystemIndexMergeName = "system-index-merge"; - public static readonly string SystemIndexCommitterName = "system-index-committer"; + public static readonly string SystemIndexCommitterName = "system"; public static readonly string SystemPersistentSubscriptionsName = "system-persistent-subscriptions"; - public static readonly string SystemRedactionName = "system-redaction"; + public static readonly string SystemRedactionName = "system"; public static readonly string SystemReplicationName = "system-replication"; public static readonly string SystemScavengeName = "system-scavenge"; public static readonly string SystemSubscriptionsName = "system-subscriptions"; - public static readonly string SystemTelemetryName = "system-telemetry"; - public static readonly string SystemWriterName = "system-writer"; + public static readonly string SystemTelemetryName = "system"; + public static readonly string SystemWriterName = "system"; } } From 8e9f52d609ad8b9721a75f30f435e01bf029590b Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Wed, 27 Nov 2024 07:26:56 +0000 Subject: [PATCH 36/38] disable archive source for 23.10 --- .../TransactionLog/Chunks/TransactionFileTracker.cs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs b/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs index 173294a97ae..42f209c9db7 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs @@ -20,7 +20,6 @@ public TFChunkTracker(CounterMetric eventMetric, CounterMetric byteMetric, strin } static string NameOf(ITransactionFileTracker.Source source) => source switch { - ITransactionFileTracker.Source.Archive => "archive", ITransactionFileTracker.Source.ChunkCache => "chunk-cache", ITransactionFileTracker.Source.File => "file", _ => "unknown", From 1b7d7e2a1ec1f591e9afee2d9d29e8a84c11bb97 Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Wed, 27 Nov 2024 08:12:25 +0000 Subject: [PATCH 37/38] even less granular --- .../Services/UserManagement/SystemAccounts.cs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs b/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs index 98c8f5e08a9..a9d155b4db3 100644 --- a/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs +++ b/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs @@ -11,15 +11,17 @@ public class SystemAccounts { public static readonly ClaimsPrincipal System = new ClaimsPrincipal(new ClaimsIdentity(Claims, "system")); public static readonly ClaimsPrincipal Anonymous = new ClaimsPrincipal(new ClaimsIdentity(new Claim[]{new Claim(ClaimTypes.Anonymous, ""), })); - public static readonly string SystemChaserName = "system-chaser"; + // we may want to make the granularity here configurable, but as a starting point we only + // separate scavenge, because its the only part the user has direct control over + public static readonly string SystemChaserName = "system"; public static readonly string SystemEpochManagerName = "system"; public static readonly string SystemName = "system"; public static readonly string SystemIndexCommitterName = "system"; - public static readonly string SystemPersistentSubscriptionsName = "system-persistent-subscriptions"; + public static readonly string SystemPersistentSubscriptionsName = "system"; public static readonly string SystemRedactionName = "system"; - public static readonly string SystemReplicationName = "system-replication"; + public static readonly string SystemReplicationName = "system"; public static readonly string SystemScavengeName = "system-scavenge"; - public static readonly string SystemSubscriptionsName = "system-subscriptions"; + public static readonly string SystemSubscriptionsName = "system"; public static readonly string SystemTelemetryName = "system"; public static readonly string SystemWriterName = "system"; } From 1b30a3a3a710683bcafde4ff6429442e96ffaa42 Mon Sep 17 00:00:00 2001 From: Timothy Coleman Date: Wed, 27 Nov 2024 11:06:51 +0000 Subject: [PATCH 38/38] better way of removing archive --- .../TransactionLog/Chunks/TFChunkTrackerTests.cs | 4 ---- .../TransactionLog/Chunks/TransactionFileTracker.cs | 11 ++++++++--- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/src/EventStore.Core.XUnit.Tests/TransactionLog/Chunks/TFChunkTrackerTests.cs b/src/EventStore.Core.XUnit.Tests/TransactionLog/Chunks/TFChunkTrackerTests.cs index 8c6140d1509..f830f0765ab 100644 --- a/src/EventStore.Core.XUnit.Tests/TransactionLog/Chunks/TFChunkTrackerTests.cs +++ b/src/EventStore.Core.XUnit.Tests/TransactionLog/Chunks/TFChunkTrackerTests.cs @@ -82,10 +82,6 @@ private void AssertMeasurements(string instrumentName, long? expectedValue) { AssertTags(m.Tags, "unknown"); Assert.Equal(0, m.Value); }, - m => { - AssertTags(m.Tags, "archive"); - Assert.Equal(0, m.Value); - }, m => { AssertTags(m.Tags, "chunk-cache"); Assert.Equal(0, m.Value); diff --git a/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs b/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs index 42f209c9db7..3df289cb1fb 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs @@ -11,18 +11,23 @@ public class TFChunkTracker : ITransactionFileTracker { public TFChunkTracker(CounterMetric eventMetric, CounterMetric byteMetric, string user) { _subMetrics = new (CounterSubMetric, CounterSubMetric)[(int)(ITransactionFileTracker.Source.EnumLength)]; + + var unknownEvents = CreateSubMetric(eventMetric, "unknown", user); + var unknownBytes = CreateSubMetric(byteMetric, "unknown", user); + for (var i = 0; i < _subMetrics.Length; i++) { var sourceName = NameOf((ITransactionFileTracker.Source)i); + var isUnknown = string.IsNullOrWhiteSpace(sourceName); _subMetrics[i] = ( - Events: CreateSubMetric(eventMetric, sourceName, user), - Bytes: CreateSubMetric(byteMetric, sourceName, user)); + Events: isUnknown ? unknownEvents : CreateSubMetric(eventMetric, sourceName, user), + Bytes: isUnknown ? unknownBytes : CreateSubMetric(byteMetric, sourceName, user)); } } static string NameOf(ITransactionFileTracker.Source source) => source switch { ITransactionFileTracker.Source.ChunkCache => "chunk-cache", ITransactionFileTracker.Source.File => "file", - _ => "unknown", + _ => "", }; static CounterSubMetric CreateSubMetric(CounterMetric metric, string source, string user) {