diff --git a/src/EventStore.Core.Tests/Fakes/FakeTfReader.cs b/src/EventStore.Core.Tests/Fakes/FakeTfReader.cs index 37e0c18edaf..aa097f9c643 100644 --- a/src/EventStore.Core.Tests/Fakes/FakeTfReader.cs +++ b/src/EventStore.Core.Tests/Fakes/FakeTfReader.cs @@ -7,19 +7,19 @@ public void Reposition(long position) { throw new NotImplementedException(); } - public SeqReadResult TryReadNext() { + public SeqReadResult TryReadNext(ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public SeqReadResult TryReadPrev() { + public SeqReadResult TryReadPrev(ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public RecordReadResult TryReadAt(long position, bool couldBeScavenged) { + public RecordReadResult TryReadAt(long position, bool couldBeScavenged, ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public bool ExistsAt(long position) { + public bool ExistsAt(long position, ITransactionFileTracker tracker) { return true; } } diff --git a/src/EventStore.Core.Tests/IIndexReaderExtensions.cs b/src/EventStore.Core.Tests/IIndexReaderExtensions.cs index 08bc0b89a73..ce6bead5c44 100644 --- a/src/EventStore.Core.Tests/IIndexReaderExtensions.cs +++ b/src/EventStore.Core.Tests/IIndexReaderExtensions.cs @@ -1,14 +1,15 @@ using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; namespace EventStore.Core.Tests { public static class IIndexReaderExtensions { public static IndexReadEventResult ReadEvent(this IIndexReader index, string streamName, long eventNumber) => - index.ReadEvent(streamName, streamName, eventNumber); + index.ReadEvent(streamName, streamName, eventNumber, ITransactionFileTracker.NoOp); public static IndexReadStreamResult ReadStreamEventsBackward(this IIndexReader index, string streamName, long fromEventNumber, int maxCount) => - index.ReadStreamEventsBackward(streamName, streamName, fromEventNumber, maxCount); + index.ReadStreamEventsBackward(streamName, streamName, fromEventNumber, maxCount, ITransactionFileTracker.NoOp); public static IndexReadStreamResult ReadStreamEventsForward(this IIndexReader index, string streamName, long fromEventNumber, int maxCount) => - index.ReadStreamEventsForward(streamName, streamName, fromEventNumber, maxCount); + index.ReadStreamEventsForward(streamName, streamName, fromEventNumber, maxCount, ITransactionFileTracker.NoOp); } } diff --git a/src/EventStore.Core.Tests/Index/FakeIndexReader.cs b/src/EventStore.Core.Tests/Index/FakeIndexReader.cs index 35e201601bc..4360cce3a82 100644 --- a/src/EventStore.Core.Tests/Index/FakeIndexReader.cs +++ b/src/EventStore.Core.Tests/Index/FakeIndexReader.cs @@ -14,22 +14,22 @@ public void Reposition(long position) { throw new NotImplementedException(); } - public SeqReadResult TryReadNext() { + public SeqReadResult TryReadNext(ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public SeqReadResult TryReadPrev() { + public SeqReadResult TryReadPrev(ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public RecordReadResult TryReadAt(long position, bool couldBeScavenged) { + public RecordReadResult TryReadAt(long position, bool couldBeScavenged, ITransactionFileTracker tracker) { var record = (LogRecord)new PrepareLogRecord(position, Guid.NewGuid(), Guid.NewGuid(), 0, 0, position.ToString(), null, -1, DateTime.UtcNow, PrepareFlags.None, "type", null, new byte[0], null); return new RecordReadResult(true, position + 1, record, 1); } - public bool ExistsAt(long position) { + public bool ExistsAt(long position, ITransactionFileTracker tracker) { return _existsAt(position); } } diff --git a/src/EventStore.Core.Tests/Index/IndexV1/table_index_on_range_query.cs b/src/EventStore.Core.Tests/Index/IndexV1/table_index_on_range_query.cs index 7c6012bfd7e..85cf52d90cf 100644 --- a/src/EventStore.Core.Tests/Index/IndexV1/table_index_on_range_query.cs +++ b/src/EventStore.Core.Tests/Index/IndexV1/table_index_on_range_query.cs @@ -34,7 +34,7 @@ public override async Task TestFixtureSetUp() { _highHasher = new Murmur3AUnsafe(); _tableIndex = new TableIndex(PathName, _lowHasher, _highHasher, "", () => new HashListMemTable(version: _ptableVersion, maxSize: 40), - () => { throw new InvalidOperationException(); }, + _ => { throw new InvalidOperationException(); }, _ptableVersion, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 5, diff --git a/src/EventStore.Core.Tests/Index/IndexV1/table_index_on_try_get_one_value_query.cs b/src/EventStore.Core.Tests/Index/IndexV1/table_index_on_try_get_one_value_query.cs index e04e5a97e9b..c4fbc8d4616 100644 --- a/src/EventStore.Core.Tests/Index/IndexV1/table_index_on_try_get_one_value_query.cs +++ b/src/EventStore.Core.Tests/Index/IndexV1/table_index_on_try_get_one_value_query.cs @@ -33,12 +33,12 @@ public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); _indexDir = PathName; - var fakeReader = new TFReaderLease(new FakeTfReader()); + var fakeReader = new TFReaderLease(new FakeTfReader(), ITransactionFileTracker.NoOp); _lowHasher = new XXHashUnsafe(); _highHasher = new Murmur3AUnsafe(); _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(_ptableVersion, maxSize: 10), - () => fakeReader, + _ => fakeReader, _ptableVersion, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 5, diff --git a/src/EventStore.Core.Tests/Index/IndexV1/table_index_should.cs b/src/EventStore.Core.Tests/Index/IndexV1/table_index_should.cs index c055116bbd5..375daa85928 100644 --- a/src/EventStore.Core.Tests/Index/IndexV1/table_index_should.cs +++ b/src/EventStore.Core.Tests/Index/IndexV1/table_index_should.cs @@ -30,7 +30,7 @@ public override async Task TestFixtureSetUp() { var highHasher = new Murmur3AUnsafe(); _tableIndex = new TableIndex(PathName, lowHasher, highHasher, "", () => new HashListMemTable(_ptableVersion, maxSize: 20), - () => { throw new InvalidOperationException(); }, + _ => { throw new InvalidOperationException(); }, _ptableVersion, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 10, diff --git a/src/EventStore.Core.Tests/Index/IndexV1/table_index_with_corrupt_index_entries_should.cs b/src/EventStore.Core.Tests/Index/IndexV1/table_index_with_corrupt_index_entries_should.cs index 990f40f2a18..49736f58275 100644 --- a/src/EventStore.Core.Tests/Index/IndexV1/table_index_with_corrupt_index_entries_should.cs +++ b/src/EventStore.Core.Tests/Index/IndexV1/table_index_with_corrupt_index_entries_should.cs @@ -22,11 +22,11 @@ public void ConstructTableIndexWithCorruptIndexEntries(byte version, bool skipIn bool createForceVerifyFile = false) { var lowHasher = new XXHashUnsafe(); var highHasher = new Murmur3AUnsafe(); - var fakeReader = new TFReaderLease(new FakeIndexReader()); + var fakeReader = new TFReaderLease(new FakeIndexReader(), ITransactionFileTracker.NoOp); _tableIndex = new TableIndex(PathName, lowHasher, highHasher, "", () => new HashListMemTable(version, maxSize: NumIndexEntries), - () => fakeReader, + _ => fakeReader, version, int.MaxValue, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: NumIndexEntries, @@ -67,7 +67,7 @@ public void ConstructTableIndexWithCorruptIndexEntries(byte version, bool skipIn //load table index again _tableIndex = new TableIndex(PathName, lowHasher, highHasher, "", () => new HashListMemTable(version, maxSize: NumIndexEntries), - () => fakeReader, + _ => fakeReader, version, int.MaxValue, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: NumIndexEntries, diff --git a/src/EventStore.Core.Tests/Index/IndexV1/table_index_with_two_ptables_and_memtable_on_range_query.cs b/src/EventStore.Core.Tests/Index/IndexV1/table_index_with_two_ptables_and_memtable_on_range_query.cs index 7cace1e5706..3403b7c2302 100644 --- a/src/EventStore.Core.Tests/Index/IndexV1/table_index_with_two_ptables_and_memtable_on_range_query.cs +++ b/src/EventStore.Core.Tests/Index/IndexV1/table_index_with_two_ptables_and_memtable_on_range_query.cs @@ -34,12 +34,12 @@ public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); _indexDir = PathName; - var fakeReader = new TFReaderLease(new FakeIndexReader()); + var fakeReader = new TFReaderLease(new FakeIndexReader(), ITransactionFileTracker.NoOp); _lowHasher = new FakeIndexHasher(); _highHasher = new FakeIndexHasher(); _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(_ptableVersion, maxSize: 10), - () => fakeReader, + _ => fakeReader, _ptableVersion, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 2, diff --git a/src/EventStore.Core.Tests/Index/IndexV2/table_index_hash_collision_when_upgrading_to_64bit.cs b/src/EventStore.Core.Tests/Index/IndexV2/table_index_hash_collision_when_upgrading_to_64bit.cs index fb8ca60cd78..e6c42a3c695 100644 --- a/src/EventStore.Core.Tests/Index/IndexV2/table_index_hash_collision_when_upgrading_to_64bit.cs +++ b/src/EventStore.Core.Tests/Index/IndexV2/table_index_hash_collision_when_upgrading_to_64bit.cs @@ -34,12 +34,12 @@ public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); _indexDir = PathName; - var fakeReader = new TFReaderLease(new FakeIndexReader()); + var fakeReader = new TFReaderLease(new FakeIndexReader(), ITransactionFileTracker.NoOp); _lowHasher = new XXHashUnsafe(); _highHasher = new Murmur3AUnsafe(); _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(PTableVersions.IndexV1, maxSize: 5), - () => fakeReader, + _ => fakeReader, PTableVersions.IndexV1, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 5 + _extraStreamHashesAtBeginning + _extraStreamHashesAtEnd, @@ -66,7 +66,7 @@ public override async Task TestFixtureSetUp() { _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(_ptableVersion, maxSize: 5), - () => fakeReader, + _ => fakeReader, _ptableVersion, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 5, @@ -150,22 +150,22 @@ public void Reposition(long position) { throw new NotImplementedException(); } - public SeqReadResult TryReadNext() { + public SeqReadResult TryReadNext(ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public SeqReadResult TryReadPrev() { + public SeqReadResult TryReadPrev(ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public RecordReadResult TryReadAt(long position, bool couldBeScavenged) { + public RecordReadResult TryReadAt(long position, bool couldBeScavenged, ITransactionFileTracker tracker) { var record = (LogRecord)new PrepareLogRecord(position, Guid.NewGuid(), Guid.NewGuid(), 0, 0, position % 2 == 0 ? "account--696193173" : "LPN-FC002_LPK51001", null, -1, DateTime.UtcNow, PrepareFlags.None, "type", null, new byte[0], null); return new RecordReadResult(true, position + 1, record, 1); } - public bool ExistsAt(long position) { + public bool ExistsAt(long position, ITransactionFileTracker tracker) { return true; } } diff --git a/src/EventStore.Core.Tests/Index/IndexV2/table_index_when_merging_upgrading_to_64bit_if_entry_doesnt_exist_drops_entry_and_carries_on.cs b/src/EventStore.Core.Tests/Index/IndexV2/table_index_when_merging_upgrading_to_64bit_if_entry_doesnt_exist_drops_entry_and_carries_on.cs index 90693eedd6c..72271f2907f 100644 --- a/src/EventStore.Core.Tests/Index/IndexV2/table_index_when_merging_upgrading_to_64bit_if_entry_doesnt_exist_drops_entry_and_carries_on.cs +++ b/src/EventStore.Core.Tests/Index/IndexV2/table_index_when_merging_upgrading_to_64bit_if_entry_doesnt_exist_drops_entry_and_carries_on.cs @@ -58,10 +58,10 @@ public override async Task TestFixtureSetUp() { var emptyStreamId = LogFormatHelper.EmptyStreamId; _indexDir = PathName; - var fakeReader = new TFReaderLease(new FakeIndexReader2()); + var fakeReader = new TFReaderLease(new FakeIndexReader2(), ITransactionFileTracker.NoOp); _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, emptyStreamId, () => new HashListMemTable(PTableVersions.IndexV1, maxSize: 3), - () => fakeReader, + _ => fakeReader, PTableVersions.IndexV1, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 3, @@ -76,7 +76,7 @@ public override async Task TestFixtureSetUp() { _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, emptyStreamId, () => new HashListMemTable(_ptableVersion, maxSize: 3), - () => fakeReader, + _ => fakeReader, _ptableVersion, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 3, @@ -139,15 +139,15 @@ public void Reposition(long position) { throw new NotImplementedException(); } - public SeqReadResult TryReadNext() { + public SeqReadResult TryReadNext(ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public SeqReadResult TryReadPrev() { + public SeqReadResult TryReadPrev(ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public RecordReadResult TryReadAt(long position, bool couldBeScavenged) { + public RecordReadResult TryReadAt(long position, bool couldBeScavenged, ITransactionFileTracker tracker) { TStreamId streamId; switch (position) { case 1: @@ -170,7 +170,7 @@ public RecordReadResult TryReadAt(long position, bool couldBeScavenged) { return new RecordReadResult(true, position + 1, record, 1); } - public bool ExistsAt(long position) { + public bool ExistsAt(long position, ITransactionFileTracker tracker) { return position != 2 && position != 1; } } diff --git a/src/EventStore.Core.Tests/Index/IndexV3/when_upgrading_index_to_64bit_stream_version.cs b/src/EventStore.Core.Tests/Index/IndexV3/when_upgrading_index_to_64bit_stream_version.cs index 330385e3214..78822478c04 100644 --- a/src/EventStore.Core.Tests/Index/IndexV3/when_upgrading_index_to_64bit_stream_version.cs +++ b/src/EventStore.Core.Tests/Index/IndexV3/when_upgrading_index_to_64bit_stream_version.cs @@ -25,12 +25,12 @@ public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); _indexDir = PathName; - var fakeReader = new TFReaderLease(new FakeIndexReader()); + var fakeReader = new TFReaderLease(new FakeIndexReader(), ITransactionFileTracker.NoOp); _lowHasher = new XXHashUnsafe(); _highHasher = new Murmur3AUnsafe(); _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(PTableVersions.IndexV2, maxSize: 5), - () => fakeReader, + _ => fakeReader, PTableVersions.IndexV2, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 5, @@ -47,7 +47,7 @@ public override async Task TestFixtureSetUp() { _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(_ptableVersion, maxSize: 5), - () => fakeReader, + _ => fakeReader, _ptableVersion, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 5, @@ -131,22 +131,22 @@ public void Reposition(long position) { throw new NotImplementedException(); } - public SeqReadResult TryReadNext() { + public SeqReadResult TryReadNext(ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public SeqReadResult TryReadPrev() { + public SeqReadResult TryReadPrev(ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public RecordReadResult TryReadAt(long position, bool couldBeScavenged) { + public RecordReadResult TryReadAt(long position, bool couldBeScavenged, ITransactionFileTracker tracker) { var record = (LogRecord)new PrepareLogRecord(position, Guid.NewGuid(), Guid.NewGuid(), 0, 0, position % 2 == 0 ? "testStream-2" : "testStream-1", null, -1, DateTime.UtcNow, PrepareFlags.None, "type", null, new byte[0], null); return new RecordReadResult(true, position + 1, record, 1); } - public bool ExistsAt(long position) { + public bool ExistsAt(long position, ITransactionFileTracker tracker) { return true; } } diff --git a/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index.cs b/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index.cs index e84ec36383a..96d43259b24 100644 --- a/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index.cs +++ b/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index.cs @@ -33,13 +33,13 @@ public override async Task TestFixtureSetUp() { _indexDir = PathName; - var fakeReader = new TFReaderLease(new FakeIndexReader(l => !Deleted.Contains(l))); + var fakeReader = new TFReaderLease(new FakeIndexReader(l => !Deleted.Contains(l)), ITransactionFileTracker.NoOp); _lowHasher = new XXHashUnsafe(); _highHasher = new Murmur3AUnsafe(); _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(PTableVersions.IndexV4, maxSize: 5), - () => fakeReader, + _ => fakeReader, PTableVersions.IndexV4, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 2, @@ -63,7 +63,7 @@ public override async Task TestFixtureSetUp() { _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(PTableVersions.IndexV4, maxSize: 5), - () => fakeReader, + _ => fakeReader, PTableVersions.IndexV4, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 2, diff --git a/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_and_another_table_is_completed_during.cs b/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_and_another_table_is_completed_during.cs index ddb006ded42..d9f90b591a7 100644 --- a/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_and_another_table_is_completed_during.cs +++ b/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_and_another_table_is_completed_during.cs @@ -40,13 +40,13 @@ public override async Task TestFixtureSetUp() { if (!scavengeBlocker.Wait(5000)) throw new Exception("Failed to continue."); return false; - })); + }), ITransactionFileTracker.NoOp); _lowHasher = new XXHashUnsafe(); _highHasher = new Murmur3AUnsafe(); _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(PTableVersions.IndexV4, maxSize: 5), - () => fakeReader, + _ => fakeReader, PTableVersions.IndexV4, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 2, @@ -79,7 +79,7 @@ public override async Task TestFixtureSetUp() { _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(PTableVersions.IndexV4, maxSize: 5), - () => fakeReader, + _ => fakeReader, PTableVersions.IndexV4, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 2, diff --git a/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_cancelled_while_scavenging_table.cs b/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_cancelled_while_scavenging_table.cs index 0249e6a18bc..ec8f282c24f 100644 --- a/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_cancelled_while_scavenging_table.cs +++ b/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_cancelled_while_scavenging_table.cs @@ -29,13 +29,13 @@ public override async Task TestFixtureSetUp() { var fakeReader = new TFReaderLease(new FakeIndexReader(l => { cancellationTokenSource.Cancel(); return true; - })); + }), ITransactionFileTracker.NoOp); _lowHasher = new XXHashUnsafe(); _highHasher = new Murmur3AUnsafe(); _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(PTableVersions.IndexV4, maxSize: 5), - () => fakeReader, + _ => fakeReader, PTableVersions.IndexV4, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 2, @@ -60,7 +60,7 @@ public override async Task TestFixtureSetUp() { _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(PTableVersions.IndexV4, maxSize: 5), - () => fakeReader, + _ => fakeReader, PTableVersions.IndexV4, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 2, diff --git a/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_cancelled_while_waiting_for_lock.cs b/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_cancelled_while_waiting_for_lock.cs index 4391e2fcbd8..2a35c60e059 100644 --- a/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_cancelled_while_waiting_for_lock.cs +++ b/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_cancelled_while_waiting_for_lock.cs @@ -24,12 +24,12 @@ public override async Task TestFixtureSetUp() { _indexDir = PathName; - var fakeReader = new TFReaderLease(new FakeIndexReader()); + var fakeReader = new TFReaderLease(new FakeIndexReader(), ITransactionFileTracker.NoOp); _lowHasher = new XXHashUnsafe(); _highHasher = new Murmur3AUnsafe(); _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(PTableVersions.IndexV4, maxSize: 5), - () => fakeReader, + _ => fakeReader, PTableVersions.IndexV4, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 2, @@ -56,7 +56,7 @@ public override async Task TestFixtureSetUp() { _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(PTableVersions.IndexV4, maxSize: 5), - () => fakeReader, + _ => fakeReader, PTableVersions.IndexV4, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 2, diff --git a/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_fails.cs b/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_fails.cs index e7fdd5bd96e..98051a90578 100644 --- a/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_fails.cs +++ b/src/EventStore.Core.Tests/Index/Scavenge/when_scavenging_a_table_index_fails.cs @@ -31,12 +31,12 @@ public override async Task TestFixtureSetUp() { _indexDir = PathName; - var fakeReader = new TFReaderLease(new FakeIndexReader()); + var fakeReader = new TFReaderLease(new FakeIndexReader(), ITransactionFileTracker.NoOp); _lowHasher = new XXHashUnsafe(); _highHasher = new Murmur3AUnsafe(); _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(PTableVersions.IndexV4, maxSize: 5), - () => throw new Exception("Expected exception") /* throw an exception when the first PTable scavenge starts and tries to acquire a reader */, + _ => throw new Exception("Expected exception") /* throw an exception when the first PTable scavenge starts and tries to acquire a reader */, PTableVersions.IndexV4, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 2, @@ -60,7 +60,7 @@ public override async Task TestFixtureSetUp() { _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(PTableVersions.IndexV4, maxSize: 5), - () => fakeReader, + _ => fakeReader, PTableVersions.IndexV4, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 2, diff --git a/src/EventStore.Core.Tests/ReadIndexExtensions.cs b/src/EventStore.Core.Tests/ReadIndexExtensions.cs index 4ac6bd96823..68e8b90bc37 100644 --- a/src/EventStore.Core.Tests/ReadIndexExtensions.cs +++ b/src/EventStore.Core.Tests/ReadIndexExtensions.cs @@ -3,6 +3,7 @@ using EventStore.Core.Data; using EventStore.Core.Services; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; namespace EventStore.Core.Tests { // Extensions to perform streamlookups inline @@ -10,32 +11,32 @@ namespace EventStore.Core.Tests { public static class IReadIndexExtensions { public static bool IsStreamDeleted(this IReadIndex self, string streamName) { var streamId = self.GetStreamId(streamName); - return self.IsStreamDeleted(streamId); + return self.IsStreamDeleted(streamId, ITransactionFileTracker.NoOp); } public static long GetStreamLastEventNumber(this IReadIndex self, string streamName) { var streamId = self.GetStreamId(streamName); - return self.GetStreamLastEventNumber(streamId); + return self.GetStreamLastEventNumber(streamId, ITransactionFileTracker.NoOp); } public static IndexReadEventResult ReadEvent(this IReadIndex self, string streamName, long eventNumber) { var streamId = self.GetStreamId(streamName); - return self.ReadEvent(streamName, streamId, eventNumber); + return self.ReadEvent(streamName, streamId, eventNumber, ITransactionFileTracker.NoOp); } public static IndexReadStreamResult ReadStreamEventsForward(this IReadIndex self, string streamName, long fromEventNumber, int maxCount) { var streamId = self.GetStreamId(streamName); - return self.ReadStreamEventsForward(streamName, streamId, fromEventNumber, maxCount); + return self.ReadStreamEventsForward(streamName, streamId, fromEventNumber, maxCount, ITransactionFileTracker.NoOp); } public static IndexReadStreamResult ReadStreamEventsBackward(this IReadIndex self, string streamName, long fromEventNumber, int maxCount) { var streamId = self.GetStreamId(streamName); - return self.ReadStreamEventsBackward(streamName, streamId, fromEventNumber, maxCount); + return self.ReadStreamEventsBackward(streamName, streamId, fromEventNumber, maxCount, ITransactionFileTracker.NoOp); } public static StreamMetadata GetStreamMetadata(this IReadIndex self, string streamName) { var streamId = self.GetStreamId(streamName); - return self.GetStreamMetadata(streamId); + return self.GetStreamMetadata(streamId, ITransactionFileTracker.NoOp); } public static List EventRecords(this IndexReadAllResult result) { diff --git a/src/EventStore.Core.Tests/Services/PersistentSubscription/PersistentSubscriptionTests.cs b/src/EventStore.Core.Tests/Services/PersistentSubscription/PersistentSubscriptionTests.cs index 936780a8f26..24dca736e20 100644 --- a/src/EventStore.Core.Tests/Services/PersistentSubscription/PersistentSubscriptionTests.cs +++ b/src/EventStore.Core.Tests/Services/PersistentSubscription/PersistentSubscriptionTests.cs @@ -26,6 +26,7 @@ using EventStore.Core.Tests.TransactionLog; using EventFilter = EventStore.Core.Services.Storage.ReaderIndex.EventFilter; using StreamMetadata = EventStore.Core.Data.StreamMetadata; +using EventStore.Core.TransactionLog; namespace EventStore.Core.Tests.Services.PersistentSubscription { public enum EventSource { @@ -189,7 +190,8 @@ public when_updating_all_stream_subscription_with_filter() { new FakeReadIndex(_ => false, new MetaStreamLookup()), ioDispatcher, bus, new PersistentSubscriptionConsumerStrategyRegistry(bus, bus, - Array.Empty())); + Array.Empty()), + ITransactionFileTrackerFactory.NoOp); _sut.Start(); _sut.Handle(new SystemMessage.BecomeLeader(correlationId: Guid.NewGuid())); diff --git a/src/EventStore.Core.Tests/Services/PersistentSubscription/when_an_error_occurs.cs b/src/EventStore.Core.Tests/Services/PersistentSubscription/when_an_error_occurs.cs index 09d30b6e77d..ae6130816d9 100644 --- a/src/EventStore.Core.Tests/Services/PersistentSubscription/when_an_error_occurs.cs +++ b/src/EventStore.Core.Tests/Services/PersistentSubscription/when_an_error_occurs.cs @@ -10,6 +10,7 @@ using EventStore.Core.Services.PersistentSubscription; using EventStore.Core.Services.PersistentSubscription.ConsumerStrategy; using EventStore.Core.Tests.TransactionLog; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.Services.PersistentSubscription { @@ -34,7 +35,8 @@ protected when_an_error_occurs(TResult expectedResult) { new FakeReadIndex(_ => false, new MetaStreamLookup()), new IODispatcher(bus, new PublishEnvelope(bus)), bus, new PersistentSubscriptionConsumerStrategyRegistry(bus, bus, - Array.Empty())); + Array.Empty()), + ITransactionFileTrackerFactory.NoOp); _envelope = new CallbackEnvelope(_replySource.SetResult); _sut.Start(); } diff --git a/src/EventStore.Core.Tests/Services/RedactionService/EventPositionTests.cs b/src/EventStore.Core.Tests/Services/RedactionService/EventPositionTests.cs index 2a8375b1256..0ba536fb3e7 100644 --- a/src/EventStore.Core.Tests/Services/RedactionService/EventPositionTests.cs +++ b/src/EventStore.Core.Tests/Services/RedactionService/EventPositionTests.cs @@ -4,6 +4,7 @@ using EventStore.Core.Data.Redaction; using EventStore.Core.Messages; using EventStore.Core.Messaging; +using EventStore.Core.TransactionLog; using FluentAssertions; using NUnit.Framework; @@ -21,7 +22,7 @@ private void WriteEvent(string streamId, long eventNumber, string data) { _positions[eventNumber] = new(); var chunk = Db.Manager.GetChunkFor(eventRecord.LogPosition); - var eventOffset = chunk.GetActualRawPosition(eventRecord.LogPosition); + var eventOffset = chunk.GetActualRawPosition(eventRecord.LogPosition, ITransactionFileTracker.NoOp); var eventPosition = new EventPosition( eventRecord.LogPosition, Path.GetFileName(chunk.FileName), chunk.ChunkHeader.Version, chunk.IsReadOnly, (uint)eventOffset); _positions[eventNumber].Add(eventPosition); diff --git a/src/EventStore.Core.Tests/Services/RedactionService/RedactionServiceTestFixture.cs b/src/EventStore.Core.Tests/Services/RedactionService/RedactionServiceTestFixture.cs index 77af5849b84..0a6acd816be 100644 --- a/src/EventStore.Core.Tests/Services/RedactionService/RedactionServiceTestFixture.cs +++ b/src/EventStore.Core.Tests/Services/RedactionService/RedactionServiceTestFixture.cs @@ -3,6 +3,7 @@ using EventStore.Core.Synchronization; using EventStore.Core.Tests.Bus; using EventStore.Core.Tests.Services.Storage; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.Services.RedactionService { @@ -16,7 +17,8 @@ public RedactionServiceTestFixture() : base(chunkSize: 1024) { } [SetUp] public virtual Task SetUp() { _switchChunksLock = new SemaphoreSlimLock(); - RedactionService = new RedactionService(new FakeQueuedHandler(), Db, ReadIndex, _switchChunksLock); + RedactionService = new RedactionService(new FakeQueuedHandler(), Db, ReadIndex, _switchChunksLock, + ITransactionFileTracker.NoOp); return Task.CompletedTask; } diff --git a/src/EventStore.Core.Tests/Services/RedactionService/SwitchChunkFailureTests.cs b/src/EventStore.Core.Tests/Services/RedactionService/SwitchChunkFailureTests.cs index 3b8e2b06e3d..633970a61cf 100644 --- a/src/EventStore.Core.Tests/Services/RedactionService/SwitchChunkFailureTests.cs +++ b/src/EventStore.Core.Tests/Services/RedactionService/SwitchChunkFailureTests.cs @@ -2,6 +2,7 @@ using System.IO; using System.Threading.Tasks; using EventStore.Core.Data.Redaction; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.Chunks.TFChunk; using NUnit.Framework; @@ -131,8 +132,7 @@ public async Task cannot_switch_with_chunk_having_mismatched_range() { newChunk = $"{nameof(cannot_switch_with_chunk_having_mismatched_range)}-chunk-0-2.tmp"; var chunkHeader = new ChunkHeader(1, 1024, 0, 2, true, Guid.NewGuid()); - var chunk = TFChunk.CreateWithHeader(Path.Combine(PathName, newChunk), chunkHeader, 1024, false, false, false, 1, 1, false, - new TFChunkTracker.NoOp()); + var chunk = TFChunk.CreateWithHeader(Path.Combine(PathName, newChunk), chunkHeader, 1024, false, false, false, 1, 1, false, ITransactionFileTracker.NoOp); chunk.Dispose(); msg = await SwitchChunk(GetChunk(0, 0), newChunk); Assert.AreEqual(SwitchChunkResult.ChunkRangeDoesNotMatch, msg.Result); diff --git a/src/EventStore.Core.Tests/Services/Replication/LeaderReplication/with_replication_service.cs b/src/EventStore.Core.Tests/Services/Replication/LeaderReplication/with_replication_service.cs index 661c67ebcf9..5861801010f 100644 --- a/src/EventStore.Core.Tests/Services/Replication/LeaderReplication/with_replication_service.cs +++ b/src/EventStore.Core.Tests/Services/Replication/LeaderReplication/with_replication_service.cs @@ -14,6 +14,7 @@ using EventStore.Core.Tests.Helpers; using EventStore.Core.Tests.Services.ElectionsService; using EventStore.Core.Tests.Services.Transport.Tcp; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Checkpoint; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.FileNamingStrategy; @@ -68,6 +69,7 @@ public override async Task TestFixtureSetUp() { epochManager: new FakeEpochManager(), clusterSize: ClusterSize, unsafeAllowSurplusNodes: false, + tfTracker: ITransactionFileTracker.NoOp, queueStatsManager: new QueueStatsManager()); Service.Handle(new SystemMessage.SystemStart()); diff --git a/src/EventStore.Core.Tests/Services/Replication/LeaderReplication/with_replication_service_and_epoch_manager.cs b/src/EventStore.Core.Tests/Services/Replication/LeaderReplication/with_replication_service_and_epoch_manager.cs index 3ad35d4a4ab..b6569c6bb2a 100644 --- a/src/EventStore.Core.Tests/Services/Replication/LeaderReplication/with_replication_service_and_epoch_manager.cs +++ b/src/EventStore.Core.Tests/Services/Replication/LeaderReplication/with_replication_service_and_epoch_manager.cs @@ -17,6 +17,7 @@ using EventStore.Core.Tests.Authorization; using EventStore.Core.Tests.Helpers; using EventStore.Core.Tests.Services.Transport.Tcp; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Checkpoint; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.FileNamingStrategy; @@ -74,6 +75,7 @@ public override async Task TestFixtureSetUp() { _logFormat.CreatePartitionManager( reader: new TFChunkReader(Db, Db.Config.WriterCheckpoint), writer: Writer), + ITransactionFileTrackerFactory.NoOp, Guid.NewGuid()); Service = new LeaderReplicationService( Publisher, @@ -83,6 +85,7 @@ public override async Task TestFixtureSetUp() { EpochManager, ClusterSize, false, + ITransactionFileTracker.NoOp, new QueueStatsManager()); Service.Handle(new SystemMessage.SystemStart()); diff --git a/src/EventStore.Core.Tests/Services/Replication/LogReplication/LogReplicationFixture.cs b/src/EventStore.Core.Tests/Services/Replication/LogReplication/LogReplicationFixture.cs index a4e0728aeb7..3a0e8b6d91f 100644 --- a/src/EventStore.Core.Tests/Services/Replication/LogReplication/LogReplicationFixture.cs +++ b/src/EventStore.Core.Tests/Services/Replication/LogReplication/LogReplicationFixture.cs @@ -20,6 +20,7 @@ using EventStore.Core.Tests.Services.Storage; using EventStore.Core.Tests.Services.Storage.ReadIndex; using EventStore.Core.Time; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Checkpoint; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.FileNamingStrategy; @@ -149,6 +150,7 @@ private LeaderInfo CreateLeader(TFChunkDb db) { epochManager: epochManager, clusterSize: ClusterSize, unsafeAllowSurplusNodes: false, + tfTracker: ITransactionFileTracker.NoOp, queueStatsManager: new QueueStatsManager()); var tcpSendService = new TcpSendService(); diff --git a/src/EventStore.Core.Tests/Services/Replication/LogReplication/LogReplicationWithExistingDbFixture.cs b/src/EventStore.Core.Tests/Services/Replication/LogReplication/LogReplicationWithExistingDbFixture.cs index 10fe3d58eee..61b8ef86179 100644 --- a/src/EventStore.Core.Tests/Services/Replication/LogReplication/LogReplicationWithExistingDbFixture.cs +++ b/src/EventStore.Core.Tests/Services/Replication/LogReplication/LogReplicationWithExistingDbFixture.cs @@ -2,6 +2,7 @@ using System.Collections.Generic; using System.Threading.Tasks; using EventStore.Core.LogAbstraction; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.Chunks.TFChunk; using EventStore.Core.TransactionLog.LogRecords; @@ -47,7 +48,7 @@ protected static Task CreateChunk(TFChunkDb db, bool raw, bool complete, int chu initialReaderCount: db.Config.InitialReaderCount, maxReaderCount: db.Config.MaxReaderCount, reduceFileCachePressure: db.Config.ReduceFileCachePressure, - tracker: new TFChunkTracker.NoOp()); + tracker: ITransactionFileTracker.NoOp); var posMaps = new List(); diff --git a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_a_single_write_before_the_transaction_is_present.cs b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_a_single_write_before_the_transaction_is_present.cs index 8e7e2a16992..c19df8e45bd 100644 --- a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_a_single_write_before_the_transaction_is_present.cs +++ b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_a_single_write_before_the_transaction_is_present.cs @@ -1,4 +1,5 @@ using EventStore.Core.Tests.TransactionLog.Scavenging.Helpers; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; @@ -17,7 +18,7 @@ public void should_be_able_to_read_the_transactional_writes_when_the_commit_is_p Rec.Prepare(3, "single_write_stream_id_3", prepareFlags: PrepareFlags.SingleWrite | PrepareFlags.IsCommitted), Rec.Prepare(4, "single_write_stream_id_4", prepareFlags: PrepareFlags.SingleWrite | PrepareFlags.IsCommitted)); - var firstRead = ReadIndex.ReadAllEventsForward(new Data.TFPos(0, 0), 10); + var firstRead = ReadIndex.ReadAllEventsForward(new Data.TFPos(0, 0), 10, ITransactionFileTracker.NoOp); Assert.AreEqual(4, firstRead.Records.Count); Assert.AreEqual("single_write_stream_id_1", firstRead.Records[0].Event.EventStreamId); @@ -35,7 +36,7 @@ public void should_be_able_to_read_the_transactional_writes_when_the_commit_is_p Rec.Prepare(4, "single_write_stream_id_4", prepareFlags: PrepareFlags.SingleWrite | PrepareFlags.IsCommitted), Rec.Commit(1, "transaction_stream_id")); - var transactionRead = ReadIndex.ReadAllEventsForward(firstRead.NextPos, 10); + var transactionRead = ReadIndex.ReadAllEventsForward(firstRead.NextPos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(1, transactionRead.Records.Count); Assert.AreEqual("transaction_stream_id", transactionRead.Records[0].Event.EventStreamId); diff --git a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_a_single_write_is_after_transaction_end_but_before_commit_is_present.cs b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_a_single_write_is_after_transaction_end_but_before_commit_is_present.cs index 0bf02d98f7c..32c672be1c2 100644 --- a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_a_single_write_is_after_transaction_end_but_before_commit_is_present.cs +++ b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_a_single_write_is_after_transaction_end_but_before_commit_is_present.cs @@ -1,4 +1,5 @@ using EventStore.Core.Tests.TransactionLog.Scavenging.Helpers; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; @@ -14,7 +15,7 @@ public void should_be_able_to_read_the_transactional_writes_when_the_commit_is_p Rec.TransEnd(0, "transaction_stream_id"), Rec.Prepare(1, "single_write_stream_id", prepareFlags: PrepareFlags.SingleWrite | PrepareFlags.IsCommitted)); - var firstRead = ReadIndex.ReadAllEventsForward(new Data.TFPos(0, 0), 10); + var firstRead = ReadIndex.ReadAllEventsForward(new Data.TFPos(0, 0), 10, ITransactionFileTracker.NoOp); Assert.AreEqual(1, firstRead.Records.Count); Assert.AreEqual("single_write_stream_id", firstRead.Records[0].Event.EventStreamId); @@ -25,7 +26,7 @@ public void should_be_able_to_read_the_transactional_writes_when_the_commit_is_p Rec.Prepare(1, "single_write_stream_id", prepareFlags: PrepareFlags.SingleWrite | PrepareFlags.IsCommitted), Rec.Commit(0, "transaction_stream_id")); - var transactionRead = ReadIndex.ReadAllEventsForward(firstRead.NextPos, 10); + var transactionRead = ReadIndex.ReadAllEventsForward(firstRead.NextPos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(1, transactionRead.Records.Count); Assert.AreEqual("transaction_stream_id", transactionRead.Records[0].Event.EventStreamId); diff --git a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_multiple_single_writes_are_after_transaction_end_but_before_commit_is_present.cs b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_multiple_single_writes_are_after_transaction_end_but_before_commit_is_present.cs index 921ae0429bd..78f0b53d028 100644 --- a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_multiple_single_writes_are_after_transaction_end_but_before_commit_is_present.cs +++ b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_multiple_single_writes_are_after_transaction_end_but_before_commit_is_present.cs @@ -1,4 +1,5 @@ using EventStore.Core.Tests.TransactionLog.Scavenging.Helpers; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; @@ -22,7 +23,7 @@ public void should_be_able_to_read_the_transactional_writes_when_the_commit_is_p Rec.Prepare(2, "single_write_stream_id_2", prepareFlags: PrepareFlags.SingleWrite | PrepareFlags.IsCommitted), Rec.Prepare(3, "single_write_stream_id_3", prepareFlags: PrepareFlags.SingleWrite | PrepareFlags.IsCommitted)); - var firstRead = ReadIndex.ReadAllEventsForward(new Data.TFPos(0, 0), 10); + var firstRead = ReadIndex.ReadAllEventsForward(new Data.TFPos(0, 0), 10, ITransactionFileTracker.NoOp); Assert.AreEqual(3, firstRead.Records.Count); Assert.AreEqual("single_write_stream_id_1", firstRead.Records[0].Event.EventStreamId); @@ -38,7 +39,7 @@ public void should_be_able_to_read_the_transactional_writes_when_the_commit_is_p Rec.Prepare(3, "single_write_stream_id_3", prepareFlags: PrepareFlags.SingleWrite | PrepareFlags.IsCommitted), Rec.Commit(0, "transaction_stream_id")); - var transactionRead = ReadIndex.ReadAllEventsForward(firstRead.NextPos, 10); + var transactionRead = ReadIndex.ReadAllEventsForward(firstRead.NextPos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(1, transactionRead.Records.Count); Assert.AreEqual("transaction_stream_id", transactionRead.Records[0].Event.EventStreamId); diff --git a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_disallowed_streams.cs b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_disallowed_streams.cs index cb7725d6e85..2dc9e9e2470 100644 --- a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_disallowed_streams.cs +++ b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_disallowed_streams.cs @@ -4,6 +4,7 @@ using NUnit.Framework; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; namespace EventStore.Core.Tests.Services.Storage.AllReader { @@ -36,7 +37,7 @@ protected override void WriteTestScenario() { [Test] public void should_filter_out_disallowed_streams_when_reading_events_forward() { - var records = ReadIndex.ReadAllEventsForward(_forwardReadPos, 10).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(_forwardReadPos, 10, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(2, records.Count); Assert.True(records.All(x => x.Event.EventStreamId != _disallowedStream)); Assert.True(records.Any(x => x.Event.EventStreamId == _allowedStream1)); @@ -50,7 +51,7 @@ public void should_filter_out_disallowed_streams_when_reading_events_forward_wit Filter.Types.FilterType.Prefix, new[] {"event-type"}); var eventFilter = EventFilter.Get(true, filter); - var result = ReadIndex.ReadAllEventsForwardFiltered(_forwardReadPos, 10, 10, eventFilter); + var result = ReadIndex.ReadAllEventsForwardFiltered(_forwardReadPos, 10, 10, eventFilter, ITransactionFileTracker.NoOp); Assert.AreEqual(2, result.Records.Count); Assert.True(result.Records.All(x => x.Event.EventStreamId != _disallowedStream)); Assert.True(result.Records.Any(x => x.Event.EventStreamId == _allowedStream1)); @@ -64,7 +65,7 @@ public void should_filter_out_disallowed_streams_when_reading_events_forward_wit Filter.Types.FilterType.Regex, new[] {@"^.*event-type-.*$"}); var eventFilter = EventFilter.Get(true, filter); - var result = ReadIndex.ReadAllEventsForwardFiltered(_forwardReadPos, 10, 10, eventFilter); + var result = ReadIndex.ReadAllEventsForwardFiltered(_forwardReadPos, 10, 10, eventFilter, ITransactionFileTracker.NoOp); Assert.AreEqual(2, result.Records.Count); Assert.True(result.Records.All(x => x.Event.EventStreamId != _disallowedStream)); Assert.True(result.Records.Any(x => x.Event.EventStreamId == _allowedStream1)); @@ -78,7 +79,7 @@ public void should_filter_out_disallowed_streams_when_reading_events_forward_wit Filter.Types.FilterType.Prefix, new[] {"$persistentsubscripti"}); var eventFilter = EventFilter.Get(true, filter); - var result = ReadIndex.ReadAllEventsForwardFiltered(_forwardReadPos, 10, 10, eventFilter); + var result = ReadIndex.ReadAllEventsForwardFiltered(_forwardReadPos, 10, 10, eventFilter, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.Records.Count); Assert.True(result.Records.All(x => x.Event.EventStreamId != _disallowedStream)); Assert.True(result.Records.Any(x => x.Event.EventStreamId == _allowedStream2)); @@ -91,7 +92,7 @@ public void should_filter_out_disallowed_streams_when_reading_events_forward_wit Filter.Types.FilterType.Regex, new[] {@"^.*istentsubsc.*$"}); var eventFilter = EventFilter.Get(true, filter); - var result = ReadIndex.ReadAllEventsForwardFiltered(_forwardReadPos, 10, 10, eventFilter); + var result = ReadIndex.ReadAllEventsForwardFiltered(_forwardReadPos, 10, 10, eventFilter, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.Records.Count); Assert.True(result.Records.All(x => x.Event.EventStreamId != _disallowedStream)); Assert.True(result.Records.Any(x => x.Event.EventStreamId == _allowedStream2)); @@ -99,7 +100,7 @@ public void should_filter_out_disallowed_streams_when_reading_events_forward_wit [Test] public void should_filter_out_disallowed_streams_when_reading_events_backward() { - var records = ReadIndex.ReadAllEventsBackward(_backwardReadPos, 10).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(_backwardReadPos, 10, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(2, records.Count); Assert.True(records.All(x => x.Event.EventStreamId != _disallowedStream)); Assert.True(records.Any(x => x.Event.EventStreamId == _allowedStream1)); @@ -113,7 +114,7 @@ public void should_filter_out_disallowed_streams_when_reading_events_backward_wi Filter.Types.FilterType.Prefix, new[] {"event-type"}); var eventFilter = EventFilter.Get(true, filter); - var result = ReadIndex.ReadAllEventsBackwardFiltered(_backwardReadPos, 10, 10, eventFilter); + var result = ReadIndex.ReadAllEventsBackwardFiltered(_backwardReadPos, 10, 10, eventFilter, ITransactionFileTracker.NoOp); Assert.AreEqual(2, result.Records.Count); Assert.True(result.Records.All(x => x.Event.EventStreamId != _disallowedStream)); Assert.True(result.Records.Any(x => x.Event.EventStreamId == _allowedStream1)); @@ -127,7 +128,7 @@ public void should_filter_out_disallowed_streams_when_reading_events_backward_wi Filter.Types.FilterType.Regex, new[] {@"^.*event-type-.*$"}); var eventFilter = EventFilter.Get(true, filter); - var result = ReadIndex.ReadAllEventsBackwardFiltered(_backwardReadPos, 10, 10, eventFilter); + var result = ReadIndex.ReadAllEventsBackwardFiltered(_backwardReadPos, 10, 10, eventFilter, ITransactionFileTracker.NoOp); Assert.AreEqual(2, result.Records.Count); Assert.True(result.Records.All(x => x.Event.EventStreamId != _disallowedStream)); Assert.True(result.Records.Any(x => x.Event.EventStreamId == _allowedStream1)); @@ -141,7 +142,7 @@ public void should_filter_out_disallowed_streams_when_reading_events_backward_wi Filter.Types.FilterType.Prefix, new[] {"$persistentsubscripti"}); var eventFilter = EventFilter.Get(true, filter); - var result = ReadIndex.ReadAllEventsBackwardFiltered(_backwardReadPos, 10, 10, eventFilter); + var result = ReadIndex.ReadAllEventsBackwardFiltered(_backwardReadPos, 10, 10, eventFilter, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.Records.Count); Assert.True(result.Records.All(x => x.Event.EventStreamId != _disallowedStream)); Assert.True(result.Records.Any(x => x.Event.EventStreamId == _allowedStream2)); @@ -154,7 +155,7 @@ public void should_filter_out_disallowed_streams_when_reading_events_backward_wi Filter.Types.FilterType.Regex, new[] {@"^.*istentsubsc.*$"}); var eventFilter = EventFilter.Get(true, filter); - var result = ReadIndex.ReadAllEventsBackwardFiltered(_backwardReadPos, 10, 10, eventFilter); + var result = ReadIndex.ReadAllEventsBackwardFiltered(_backwardReadPos, 10, 10, eventFilter, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.Records.Count); Assert.True(result.Records.All(x => x.Event.EventStreamId != _disallowedStream)); Assert.True(result.Records.Any(x => x.Event.EventStreamId == _allowedStream2)); diff --git a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_filtering.cs b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_filtering.cs index e6e3eeddc0a..80d747784ff 100644 --- a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_filtering.cs +++ b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_filtering.cs @@ -4,6 +4,7 @@ using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; namespace EventStore.Core.Tests.Services.Storage.AllReader { @@ -34,7 +35,7 @@ public void should_read_only_events_forward_with_event_type_prefix() { Filter.Types.FilterType.Prefix, new[] {"event-type"}); var eventFilter = EventFilter.Get(true, filter); - var result = ReadIndex.ReadAllEventsForwardFiltered(_forwardReadPos, 10, 10, eventFilter); + var result = ReadIndex.ReadAllEventsForwardFiltered(_forwardReadPos, 10, 10, eventFilter, ITransactionFileTracker.NoOp); Assert.AreEqual(2, result.Records.Count); } @@ -45,7 +46,7 @@ public void should_read_only_events_forward_with_event_type_regex() { Filter.Types.FilterType.Regex, new[] {@"^.*other-event.*$"}); var eventFilter = EventFilter.Get(true, filter); - var result = ReadIndex.ReadAllEventsForwardFiltered(_forwardReadPos, 10, 10, eventFilter); + var result = ReadIndex.ReadAllEventsForwardFiltered(_forwardReadPos, 10, 10, eventFilter, ITransactionFileTracker.NoOp); Assert.AreEqual(2, result.Records.Count); } @@ -56,7 +57,7 @@ public void should_read_only_events_forward_with_stream_id_prefix() { Filter.Types.FilterType.Prefix, new[] {"ES2"}); var eventFilter = EventFilter.Get(true, filter); - var result = ReadIndex.ReadAllEventsForwardFiltered(_forwardReadPos, 10, 10, eventFilter); + var result = ReadIndex.ReadAllEventsForwardFiltered(_forwardReadPos, 10, 10, eventFilter, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.Records.Count); } @@ -67,7 +68,7 @@ public void should_read_only_events_forward_with_stream_id_regex() { Filter.Types.FilterType.Regex, new[] {@"^.*ES2.*$"}); var eventFilter = EventFilter.Get(true, filter); - var result = ReadIndex.ReadAllEventsForwardFiltered(_forwardReadPos, 10, 10, eventFilter); + var result = ReadIndex.ReadAllEventsForwardFiltered(_forwardReadPos, 10, 10, eventFilter, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.Records.Count); } @@ -78,7 +79,7 @@ public void should_read_only_events_backward_with_event_type_prefix() { Filter.Types.FilterType.Prefix, new[] {"event-type"}); var eventFilter = EventFilter.Get(true, filter); - var result = ReadIndex.ReadAllEventsBackwardFiltered(_backwardReadPos, 10, 10, eventFilter); + var result = ReadIndex.ReadAllEventsBackwardFiltered(_backwardReadPos, 10, 10, eventFilter, ITransactionFileTracker.NoOp); Assert.AreEqual(2, result.Records.Count); } @@ -89,7 +90,7 @@ public void should_read_only_events_backward_with_event_type_regex() { Filter.Types.FilterType.Regex, new[] {@"^.*other-event.*$"}); var eventFilter = EventFilter.Get(true, filter); - var result = ReadIndex.ReadAllEventsBackwardFiltered(_backwardReadPos, 10, 10, eventFilter); + var result = ReadIndex.ReadAllEventsBackwardFiltered(_backwardReadPos, 10, 10, eventFilter, ITransactionFileTracker.NoOp); Assert.AreEqual(2, result.Records.Count); } @@ -100,7 +101,7 @@ public void should_read_only_events_backward_with_stream_id_prefix() { Filter.Types.FilterType.Prefix, new[] {"ES2"}); var eventFilter = EventFilter.Get(true, filter); - var result = ReadIndex.ReadAllEventsBackwardFiltered(_backwardReadPos, 10, 10, eventFilter); + var result = ReadIndex.ReadAllEventsBackwardFiltered(_backwardReadPos, 10, 10, eventFilter, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.Records.Count); } @@ -111,7 +112,7 @@ public void should_read_only_events_backward_with_stream_id_regex() { Filter.Types.FilterType.Regex, new[] {@"^.*ES2.*$"}); var eventFilter = EventFilter.Get(true, filter); - var result = ReadIndex.ReadAllEventsBackwardFiltered(_backwardReadPos, 10, 10, eventFilter); + var result = ReadIndex.ReadAllEventsBackwardFiltered(_backwardReadPos, 10, 10, eventFilter, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.Records.Count); } } diff --git a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_filtering_and_transactions.cs b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_filtering_and_transactions.cs index 2e74ce4d2d9..6e95ed6abfc 100644 --- a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_filtering_and_transactions.cs +++ b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_filtering_and_transactions.cs @@ -3,6 +3,7 @@ using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; using EventStore.Core.Tests.TransactionLog.Scavenging.Helpers; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.Services.Storage.AllReader { @@ -43,7 +44,8 @@ static Rec[] ExplicitTransaction(int transaction, string stream) => new[] { pos: new Data.TFPos(0, 0), maxCount: 10, maxSearchWindow: int.MaxValue, - eventFilter: EventFilter.StreamName.Prefixes(false, "included")); + eventFilter: EventFilter.StreamName.Prefixes(false, "included"), + tracker: ITransactionFileTracker.NoOp); Assert.AreEqual(10, read.Records.Count); for (int j = 0; j < 10; j++) @@ -83,7 +85,8 @@ static Rec[] ExplicitTransaction(int transaction, string stream) => new[] { pos: new TFPos(writerCp, writerCp), maxCount: 10, maxSearchWindow: int.MaxValue, - eventFilter: EventFilter.StreamName.Prefixes(false, "included")); + eventFilter: EventFilter.StreamName.Prefixes(false, "included"), + tracker: ITransactionFileTracker.NoOp); Assert.AreEqual(10, read.Records.Count); for (int j = 9; j <= 0; j--) diff --git a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_last_indexed_set.cs b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_last_indexed_set.cs index ed05a3a0548..dea2172c56a 100644 --- a/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_last_indexed_set.cs +++ b/src/EventStore.Core.Tests/Services/Storage/AllReader/when_reading_all_with_last_indexed_set.cs @@ -2,6 +2,7 @@ using NUnit.Framework; using EventStore.Core.Data; using EventStore.Core.TransactionLog.LogRecords; +using EventStore.Core.TransactionLog; namespace EventStore.Core.Tests.Services.Storage.AllReader { [TestFixture(typeof(LogFormat.V2), typeof(string))] @@ -18,13 +19,13 @@ protected override void WriteTestScenario() { public void should_be_able_to_read_all_backwards() { var checkpoint = WriterCheckpoint.Read(); var pos = new TFPos(checkpoint, checkpoint); - var result = ReadIndex.ReadAllEventsBackward(pos, 10).EventRecords(); + var result = ReadIndex.ReadAllEventsBackward(pos, 10, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(3, result.Count); } [Test] public void should_be_able_to_read_all_forwards() { - var result = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10).EventRecords(); + var result = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(3, result.Count); } } diff --git a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_duplicate_events_in_a_stream.cs b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_duplicate_events_in_a_stream.cs index 310650e99cd..2faa5b19b38 100644 --- a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_duplicate_events_in_a_stream.cs +++ b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_duplicate_events_in_a_stream.cs @@ -143,7 +143,7 @@ public override async Task TestFixtureSetUp() { var emptyStreamId = _logFormat.EmptyStreamId; _tableIndex = new TableIndex(indexDirectory, lowHasher, highHasher, emptyStreamId, () => new HashListMemTable(IndexBitnessVersion, MaxEntriesInMemTable * 2), - () => new TFReaderLease(readers), + _ => new TFReaderLease(readers, ITransactionFileTracker.NoOp), IndexBitnessVersion, int.MaxValue, Constants.PTableMaxReaderCountDefault, @@ -172,6 +172,7 @@ public override async Task TestFixtureSetUp() { indexCheckpoint: _db.Config.IndexCheckpoint, indexStatusTracker: new IndexStatusTracker.NoOp(), indexTracker: new IndexTracker.NoOp(), + tfTrackers: ITransactionFileTrackerFactory.NoOp, cacheTracker: new CacheHitsMissesTracker.NoOp()); @@ -192,7 +193,7 @@ public override async Task TestFixtureSetUp() { _tableIndex = new TableIndex(indexDirectory, lowHasher, highHasher, emptyStreamId, () => new HashListMemTable(IndexBitnessVersion, MaxEntriesInMemTable * 2), - () => new TFReaderLease(readers), + _ => new TFReaderLease(readers, ITransactionFileTracker.NoOp), IndexBitnessVersion, int.MaxValue, Constants.PTableMaxReaderCountDefault, @@ -220,6 +221,7 @@ public override async Task TestFixtureSetUp() { indexCheckpoint: _db.Config.IndexCheckpoint, indexStatusTracker: new IndexStatusTracker.NoOp(), indexTracker: new IndexTracker.NoOp(), + tfTrackers: ITransactionFileTrackerFactory.NoOp, cacheTracker: new CacheHitsMissesTracker.NoOp()); readIndex.IndexCommitter.Init(chaserCheckpoint.Read()); diff --git a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_multiple_events_in_a_stream.cs b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_multiple_events_in_a_stream.cs index a2175b2592e..40b443f8995 100644 --- a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_multiple_events_in_a_stream.cs +++ b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_multiple_events_in_a_stream.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -109,7 +110,7 @@ public void no_events_are_return_if_event_stream_doesnt_exist() { [Test] public void read_all_events_forward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(2, records.Count); Assert.AreEqual(_id1, records[0].Event.EventId); @@ -118,7 +119,7 @@ public void read_all_events_forward_returns_all_events_in_correct_order() { [Test] public void read_all_events_backward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 10).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 10, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(2, records.Count); Assert.AreEqual(_id1, records[1].Event.EventId); diff --git a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits.cs b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits.cs index a892fc61ee6..1c6d54ab6ec 100644 --- a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits.cs +++ b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -105,7 +106,7 @@ public void the_stream_can_be_read_for_second_stream_from_event_number() { [Test] public void read_all_events_forward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(3, records.Count); Assert.AreEqual(_id1, records[0].Event.EventId); @@ -115,7 +116,7 @@ public void read_all_events_forward_returns_all_events_in_correct_order() { [Test] public void read_all_events_backward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 10).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 10, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(3, records.Count); Assert.AreEqual(_id1, records[2].Event.EventId); diff --git a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits_for_events_with_version_numbers_greater_than_int_maxvalue.cs b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits_for_events_with_version_numbers_greater_than_int_maxvalue.cs index 57c0c5a2a4a..de564315b07 100644 --- a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits_for_events_with_version_numbers_greater_than_int_maxvalue.cs +++ b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits_for_events_with_version_numbers_greater_than_int_maxvalue.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -101,7 +102,7 @@ public void the_stream_can_be_read_for_second_stream_from_event_number() { [Test] public void read_all_events_forward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(3, records.Count); Assert.AreEqual(_id1, records[0].Event.EventId); @@ -111,7 +112,7 @@ public void read_all_events_forward_returns_all_events_in_correct_order() { [Test] public void read_all_events_backward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 10).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 10, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(3, records.Count); Assert.AreEqual(_id1, records[2].Event.EventId); diff --git a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits_for_log_records_of_mixed_versions.cs b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits_for_log_records_of_mixed_versions.cs index cde4fa1ff00..83c823a79ad 100644 --- a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits_for_log_records_of_mixed_versions.cs +++ b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_and_commits_for_log_records_of_mixed_versions.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using EventStore.LogCommon; using NUnit.Framework; @@ -98,7 +99,7 @@ public void the_stream_can_be_read_for_second_stream_from_event_number() { [Test] public void read_all_events_forward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10).Records; + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(3, records.Count); Assert.AreEqual(_id1, records[0].Event.EventId); @@ -108,7 +109,7 @@ public void read_all_events_forward_returns_all_events_in_correct_order() { [Test] public void read_all_events_backward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 10).Records; + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(3, records.Count); Assert.AreEqual(_id1, records[2].Event.EventId); diff --git a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_but_no_commits.cs b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_but_no_commits.cs index d51e8fcebd3..8b16380b0ed 100644 --- a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_but_no_commits.cs +++ b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_prepares_but_no_commits.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; @@ -52,13 +53,13 @@ public void the_last_event_is_not_returned_for_stream() { [Test] public void read_all_events_forward_returns_no_events() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(0, records.Count); } [Test] public void read_all_events_backward_returns_no_events() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 10).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 10, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(0, records.Count); } } diff --git a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_two_events_in_stream.cs b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_two_events_in_stream.cs index 3d0d0d8050d..2184a90bbfa 100644 --- a/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_two_events_in_stream.cs +++ b/src/EventStore.Core.Tests/Services/Storage/BuildingIndex/when_building_an_index_off_tfile_with_two_events_in_stream.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -100,7 +101,7 @@ public void the_stream_cant_be_read_for_second_stream() { [Test] public void read_all_events_forward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(2, records.Count); Assert.AreEqual(_id1, records[0].Event.EventId); @@ -109,7 +110,7 @@ public void read_all_events_forward_returns_all_events_in_correct_order() { [Test] public void read_all_events_backward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 10).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 10, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(2, records.Count); Assert.AreEqual(_id1, records[1].Event.EventId); diff --git a/src/EventStore.Core.Tests/Services/Storage/Chaser/with_storage_chaser_service.cs b/src/EventStore.Core.Tests/Services/Storage/Chaser/with_storage_chaser_service.cs index 327f664a850..059312c3f0e 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Chaser/with_storage_chaser_service.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Chaser/with_storage_chaser_service.cs @@ -7,6 +7,7 @@ using EventStore.Core.Services.Storage; using EventStore.Core.Services.Storage.EpochManager; using EventStore.Core.Tests.Services.ElectionsService; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Checkpoint; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.FileNamingStrategy; @@ -39,7 +40,7 @@ public override async Task TestFixtureSetUp() { await base.TestFixtureSetUp(); Db = new TFChunkDb(CreateDbConfig()); Db.Open(); - Chaser = new TFChunkChaser(Db, _writerChk, _chaserChk, false); + Chaser = new TFChunkChaser(Db, _writerChk, _chaserChk, false, ITransactionFileTracker.NoOp); Chaser.Open(); Writer = new TFChunkWriter(Db); Writer.Open(); diff --git a/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_hard_deleting_stream.cs b/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_hard_deleting_stream.cs index e6d19ee5e8b..583ceea5d6a 100644 --- a/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_hard_deleting_stream.cs +++ b/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_hard_deleting_stream.cs @@ -21,10 +21,10 @@ protected override void WriteTestScenario() { public void should_change_expected_version_to_deleted_event_number_when_reading() { var chunk = Db.Manager.GetChunk(0); var chunkRecords = new List(); - RecordReadResult result = chunk.TryReadFirst(); + RecordReadResult result = chunk.TryReadFirst(ITransactionFileTracker.NoOp); while (result.Success) { chunkRecords.Add(result.LogRecord); - result = chunk.TryReadClosestForward(result.NextPosition); + result = chunk.TryReadClosestForward(result.NextPosition, ITransactionFileTracker.NoOp); } Assert.That(chunkRecords.Any(x => diff --git a/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_hard_deleting_stream_with_log_version_0.cs b/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_hard_deleting_stream_with_log_version_0.cs index 1216eaa3692..6cbdeee76d9 100644 --- a/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_hard_deleting_stream_with_log_version_0.cs +++ b/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_hard_deleting_stream_with_log_version_0.cs @@ -41,10 +41,10 @@ private void WriteV0HardDelete(string eventStreamId) { public void should_change_expected_version_to_deleted_event_number_when_reading() { var chunk = Db.Manager.GetChunk(0); var chunkRecords = new List(); - RecordReadResult result = chunk.TryReadFirst(); + RecordReadResult result = chunk.TryReadFirst(ITransactionFileTracker.NoOp); while (result.Success) { chunkRecords.Add(result.LogRecord); - result = chunk.TryReadClosestForward(result.NextPosition); + result = chunk.TryReadClosestForward(result.NextPosition, ITransactionFileTracker.NoOp); } Assert.That(chunkRecords.Any(x => diff --git a/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_writing_delete_prepare_but_no_commit_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_writing_delete_prepare_but_no_commit_read_index_should.cs index 7ab46d7121e..219550f8328 100644 --- a/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_writing_delete_prepare_but_no_commit_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_writing_delete_prepare_but_no_commit_read_index_should.cs @@ -2,6 +2,7 @@ using System.Linq; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -71,7 +72,7 @@ public void read_stream_events_backward_should_return_commited_records() { [Test] public void read_all_forward_should_return_all_stream_records_except_uncommited() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(2, events.Length); @@ -81,7 +82,7 @@ public void read_all_forward_should_return_all_stream_records_except_uncommited( [Test] public void read_all_backward_should_return_all_stream_records_except_uncommited() { - var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords() + var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(2, events.Length); diff --git a/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_writing_few_prepares_on_same_event_number_and_commiting_delete_on_this_version_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_writing_few_prepares_on_same_event_number_and_commiting_delete_on_this_version_read_index_should.cs index 121768c32ec..87a3088413c 100644 --- a/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_writing_few_prepares_on_same_event_number_and_commiting_delete_on_this_version_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/DeletingStream/when_writing_few_prepares_on_same_event_number_and_commiting_delete_on_this_version_read_index_should.cs @@ -2,6 +2,7 @@ using System.Linq; using EventStore.Core.Data; using EventStore.Core.Services; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -112,7 +113,7 @@ public void read_stream_events_backward_should_return_stream_deleted() { [Test] public void read_all_forward_should_return_all_stream_records_except_uncommited() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(1, events.Length); @@ -121,7 +122,7 @@ public void read_all_forward_should_return_all_stream_records_except_uncommited( [Test] public void read_all_backward_should_return_all_stream_records_except_uncommited() { - var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords() + var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(1, events.Length); diff --git a/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_having_TFLog_with_existing_epochs.cs b/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_having_TFLog_with_existing_epochs.cs index 63410d1c3d2..9e34d0e823d 100644 --- a/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_having_TFLog_with_existing_epochs.cs +++ b/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_having_TFLog_with_existing_epochs.cs @@ -21,6 +21,7 @@ using EventStore.Common.Utils; using Newtonsoft.Json.Linq; using EventStore.Core.LogV3; +using EventStore.Core.TransactionLog; namespace EventStore.Core.Tests.Services.Storage { [TestFixture(typeof(LogFormat.V2), typeof(string))] @@ -56,6 +57,7 @@ private EpochManager GetManager() { _logFormat.CreatePartitionManager( reader: new TFChunkReader(_db, _db.Config.WriterCheckpoint), writer: _writer), + ITransactionFileTrackerFactory.NoOp, _instanceId); } private LinkedList GetCache(EpochManager manager) { @@ -260,10 +262,10 @@ public void can_add_epochs_to_cache() { Assert.AreEqual(2, epochsWritten.Length); for (int i = 0; i < epochsWritten.Length; i++) { _reader.Reposition(epochsWritten[i].Epoch.EpochPosition); - _reader.TryReadNext(); // read epoch + _reader.TryReadNext(ITransactionFileTracker.NoOp); // read epoch IPrepareLogRecord epochInfo; while (true) { - var result = _reader.TryReadNext(); + var result = _reader.TryReadNext(ITransactionFileTracker.NoOp); Assert.True(result.Success); if (result.LogRecord is IPrepareLogRecord prepare) { epochInfo = prepare; diff --git a/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_having_an_epoch_manager_and_empty_tf_log.cs b/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_having_an_epoch_manager_and_empty_tf_log.cs index 65b04bc6a51..7dd7b05f560 100644 --- a/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_having_an_epoch_manager_and_empty_tf_log.cs +++ b/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_having_an_epoch_manager_and_empty_tf_log.cs @@ -21,6 +21,7 @@ using EventStore.Common.Utils; using EventStore.Core.LogAbstraction; using EventStore.Core.LogV3; +using EventStore.Core.TransactionLog; namespace EventStore.Core.Tests.Services.Storage { [TestFixture(typeof(LogFormat.V2), typeof(string))] @@ -55,6 +56,7 @@ private EpochManager GetManager() { _logFormat.CreatePartitionManager( reader: new TFChunkReader(_db, _db.Config.WriterCheckpoint), writer: _writer), + ITransactionFileTrackerFactory.NoOp, _instanceId); } private LinkedList GetCache(EpochManager manager) { @@ -159,10 +161,10 @@ public void can_write_epochs() { Assert.AreEqual(1 + 4 + 16, epochsWritten.Length); for (int i = 0; i < epochsWritten.Length; i++) { _reader.Reposition(epochsWritten[i].Epoch.EpochPosition); - _reader.TryReadNext(); // read epoch + _reader.TryReadNext(ITransactionFileTracker.NoOp); // read epoch IPrepareLogRecord epochInfo; while (true) { - var result = _reader.TryReadNext(); + var result = _reader.TryReadNext(ITransactionFileTracker.NoOp); Assert.True(result.Success); if (result.LogRecord is IPrepareLogRecord prepare) { epochInfo = prepare; diff --git a/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_starting_having_TFLog_with_existing_epochs.cs b/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_starting_having_TFLog_with_existing_epochs.cs index 1f5e822932f..c9f25b1b2bc 100644 --- a/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_starting_having_TFLog_with_existing_epochs.cs +++ b/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_starting_having_TFLog_with_existing_epochs.cs @@ -18,6 +18,7 @@ using EventStore.Core.TransactionLog.LogRecords; using System.Threading; using EventStore.Core.LogAbstraction; +using EventStore.Core.TransactionLog; namespace EventStore.Core.Tests.Services.Storage { [TestFixture(typeof(LogFormat.V2), typeof(string))] @@ -54,6 +55,7 @@ private EpochManager GetManager() { _logFormat.CreatePartitionManager( reader: new TFChunkReader(_db, _db.Config.WriterCheckpoint), writer: _writer), + ITransactionFileTrackerFactory.NoOp, _instanceId); } private LinkedList GetCache(EpochManager manager) { @@ -129,6 +131,7 @@ public void starting_epoch_manager_with_cache_larger_than_epoch_count_loads_all_ _logFormat.CreatePartitionManager( reader: new TFChunkReader(_db, _db.Config.WriterCheckpoint), writer: _writer), + ITransactionFileTrackerFactory.NoOp, _instanceId); _epochManager.Init(); _cache = GetCache(_epochManager); diff --git a/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_starting_having_TFLog_with_no_epochs.cs b/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_starting_having_TFLog_with_no_epochs.cs index 029f3f72171..04ca05062af 100644 --- a/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_starting_having_TFLog_with_no_epochs.cs +++ b/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_starting_having_TFLog_with_no_epochs.cs @@ -18,6 +18,7 @@ using EventStore.Core.TransactionLog.LogRecords; using System.Threading; using EventStore.Core.LogAbstraction; +using EventStore.Core.TransactionLog; namespace EventStore.Core.Tests.Services.Storage { [TestFixture(typeof(LogFormat.V2), typeof(string))] @@ -54,6 +55,7 @@ private EpochManager GetManager() { _logFormat.CreatePartitionManager( reader: new TFChunkReader(_db, _db.Config.WriterCheckpoint), writer: _writer), + ITransactionFileTrackerFactory.NoOp, _instanceId); } private LinkedList GetCache(EpochManager manager) { diff --git a/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_truncating_the_epoch_checkpoint.cs b/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_truncating_the_epoch_checkpoint.cs index 05de65b2346..6b760e596d5 100644 --- a/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_truncating_the_epoch_checkpoint.cs +++ b/src/EventStore.Core.Tests/Services/Storage/EpochManager/when_truncating_the_epoch_checkpoint.cs @@ -8,6 +8,7 @@ using NUnit.Framework; using EventStore.Core.Services.Storage.EpochManager; using EventStore.Core.TransactionLog.LogRecords; +using EventStore.Core.TransactionLog; namespace EventStore.Core.Tests.Services.Storage.EpochManager { public abstract class @@ -61,6 +62,7 @@ public void SetUp() { _logFormat.CreatePartitionManager( reader: new TFChunkReader(_db, _db.Config.WriterCheckpoint), writer: _writer), + ITransactionFileTrackerFactory.NoOp, _instanceId); _epochManager.Init(); diff --git a/src/EventStore.Core.Tests/Services/Storage/FakeInMemoryTFReader.cs b/src/EventStore.Core.Tests/Services/Storage/FakeInMemoryTFReader.cs index 1f043bfeb2b..d0dd38be63b 100644 --- a/src/EventStore.Core.Tests/Services/Storage/FakeInMemoryTFReader.cs +++ b/src/EventStore.Core.Tests/Services/Storage/FakeInMemoryTFReader.cs @@ -23,7 +23,7 @@ public void Reposition(long position) { _curPosition = position; } - public SeqReadResult TryReadNext() { + public SeqReadResult TryReadNext(ITransactionFileTracker tracker) { NumReads++; if (_records.ContainsKey(_curPosition)){ var pos = _curPosition; @@ -34,11 +34,11 @@ public SeqReadResult TryReadNext() { } } - public SeqReadResult TryReadPrev() { + public SeqReadResult TryReadPrev(ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public RecordReadResult TryReadAt(long position, bool couldBeScavenged) { + public RecordReadResult TryReadAt(long position, bool couldBeScavenged, ITransactionFileTracker tracker) { NumReads++; if (_records.ContainsKey(position)){ return new RecordReadResult(true, 0, _records[position], 0); @@ -47,7 +47,7 @@ public RecordReadResult TryReadAt(long position, bool couldBeScavenged) { } } - public bool ExistsAt(long position) { + public bool ExistsAt(long position, ITransactionFileTracker tracker) { return _records.ContainsKey(position); } } diff --git a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/GetStreamLastEventNumber_NoCollisions.cs b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/GetStreamLastEventNumber_NoCollisions.cs index cf973c48113..f272c45197f 100644 --- a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/GetStreamLastEventNumber_NoCollisions.cs +++ b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/GetStreamLastEventNumber_NoCollisions.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Tests.Index.Hashers; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.Services.Storage.HashCollisions { @@ -35,7 +36,7 @@ public void with_no_events() { ReadIndex.GetStreamLastEventNumber_NoCollisions( Hash, GetStreamId, - long.MaxValue)); + long.MaxValue, ITransactionFileTracker.NoOp)); } } @@ -50,7 +51,7 @@ public void with_one_event() { ReadIndex.GetStreamLastEventNumber_NoCollisions( Hash, GetStreamId, - long.MaxValue)); + long.MaxValue, ITransactionFileTracker.NoOp)); } } @@ -79,7 +80,7 @@ public void with_multiple_events() { ReadIndex.GetStreamLastEventNumber_NoCollisions( Hash, GetStreamId, - long.MaxValue)); + long.MaxValue, ITransactionFileTracker.NoOp)); } [Test] @@ -88,31 +89,31 @@ public void with_multiple_events_and_before_position() { ReadIndex.GetStreamLastEventNumber_NoCollisions( Hash, GetStreamId, - _third.LogPosition + 1)); + _third.LogPosition + 1, ITransactionFileTracker.NoOp)); Assert.AreEqual(2, ReadIndex.GetStreamLastEventNumber_NoCollisions( Hash, GetStreamId, - _third.LogPosition)); + _third.LogPosition, ITransactionFileTracker.NoOp)); Assert.AreEqual(1, ReadIndex.GetStreamLastEventNumber_NoCollisions( Hash, GetStreamId, - _second.LogPosition)); + _second.LogPosition, ITransactionFileTracker.NoOp)); Assert.AreEqual(0, ReadIndex.GetStreamLastEventNumber_NoCollisions( Hash, GetStreamId, - _first.LogPosition)); + _first.LogPosition, ITransactionFileTracker.NoOp)); Assert.AreEqual(ExpectedVersion.NoStream, ReadIndex.GetStreamLastEventNumber_NoCollisions( Hash, GetStreamId, - _zeroth.LogPosition)); + _zeroth.LogPosition, ITransactionFileTracker.NoOp)); } } @@ -131,7 +132,7 @@ public void with_deleted_stream() { ReadIndex.GetStreamLastEventNumber_NoCollisions( Hash, GetStreamId, - long.MaxValue)); + long.MaxValue, ITransactionFileTracker.NoOp)); } } } diff --git a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_hash_collisions.cs b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_hash_collisions.cs index 582383ce553..188b72e5b61 100644 --- a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_hash_collisions.cs +++ b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_hash_collisions.cs @@ -41,7 +41,7 @@ protected virtual void when() { public void Setup() { given(); _indexDir = PathName; - _fakeReader = new TFReaderLease(new FakeReader()); + _fakeReader = new TFReaderLease(new FakeReader(), ITransactionFileTracker.NoOp); _indexBackend = new FakeIndexBackend(_fakeReader); _logFormat = LogFormatHelper.LogFormatFactory.Create(new() { @@ -52,7 +52,7 @@ public void Setup() { _highHasher = _logFormat.HighHasher; _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, _logFormat.EmptyStreamId, () => new HashListMemTable(PTableVersions.IndexV1, maxSize: _maxMemTableSize), - () => _fakeReader, + _ => _fakeReader, PTableVersions.IndexV1, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: _maxMemTableSize, @@ -101,7 +101,7 @@ protected override void when() { [Test] public void should_return_no_stream() { - Assert.AreEqual(ExpectedVersion.NoStream, _indexReader.GetStreamLastEventNumber("account--696193173")); + Assert.AreEqual(ExpectedVersion.NoStream, _indexReader.GetStreamLastEventNumber("account--696193173", ITransactionFileTracker.NoOp)); } } @@ -138,7 +138,7 @@ protected override void when() { [Test] public void should_return_invalid_event_number() { Assert.AreEqual(EventStore.Core.Data.EventNumber.Invalid, - _indexReader.GetStreamLastEventNumber(stream1Id)); + _indexReader.GetStreamLastEventNumber(stream1Id, ITransactionFileTracker.NoOp)); } } @@ -174,7 +174,7 @@ protected override void when() { [Test] public void should_return_last_event_number() { - Assert.AreEqual(0, _indexReader.GetStreamLastEventNumber(stream1Id)); + Assert.AreEqual(0, _indexReader.GetStreamLastEventNumber(stream1Id, ITransactionFileTracker.NoOp)); } } @@ -209,7 +209,7 @@ protected override void when() { [Test] public void should_return_invalid_event_number() { Assert.AreEqual(EventStore.Core.Data.EventNumber.Invalid, - _indexReader.GetStreamLastEventNumber("account--696193173")); + _indexReader.GetStreamLastEventNumber("account--696193173", ITransactionFileTracker.NoOp)); } } @@ -311,7 +311,7 @@ protected override void when() { _tableIndex.Close(false); _tableIndex = new TableIndex(_indexDir, _lowHasher, _highHasher, "", () => new HashListMemTable(PTableVersions.IndexV2, maxSize: _maxMemTableSize), - () => _fakeReader, + _ => _fakeReader, PTableVersions.IndexV2, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: _maxMemTableSize, @@ -330,7 +330,7 @@ protected override void when() { [Test] public void should_return_the_correct_last_event_number() { - var result = _indexReader.GetStreamLastEventNumber(streamId); + var result = _indexReader.GetStreamLastEventNumber(streamId, ITransactionFileTracker.NoOp); Assert.AreEqual(2, result); } @@ -419,7 +419,7 @@ public FakeIndexBackend(TFReaderLease readerLease) { _readerLease = readerLease; } - public TFReaderLease BorrowReader() { + public TFReaderLease BorrowReader(ITransactionFileTracker tracker) { return _readerLease; } @@ -466,22 +466,22 @@ public void Reposition(long position) { throw new NotImplementedException(); } - public SeqReadResult TryReadNext() { + public SeqReadResult TryReadNext(ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public SeqReadResult TryReadPrev() { + public SeqReadResult TryReadPrev(ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public RecordReadResult TryReadAt(long position, bool couldBeScavenged) { + public RecordReadResult TryReadAt(long position, bool couldBeScavenged, ITransactionFileTracker tracker) { var record = (LogRecord)new PrepareLogRecord(position, Guid.NewGuid(), Guid.NewGuid(), 0, 0, position % 2 == 0 ? "account--696193173" : "LPN-FC002_LPK51001", null, -1, DateTime.UtcNow, PrepareFlags.None, "type", null, new byte[0], null); return new RecordReadResult(true, position + 1, record, 1); } - public bool ExistsAt(long position) { + public bool ExistsAt(long position, ITransactionFileTracker tracker) { return true; } } diff --git a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_single_deleted_event_stream_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_single_deleted_event_stream_read_index_should.cs index 97e424d7d53..f974cc555af 100644 --- a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_single_deleted_event_stream_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_single_deleted_event_stream_read_index_should.cs @@ -1,6 +1,7 @@ using System.Linq; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -111,7 +112,7 @@ public void not_find_record_with_non_existing_version_for_event_stream_with_same [Test] public void return_all_events_on_read_all_forward() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(2, events.Length); @@ -121,7 +122,7 @@ public void return_all_events_on_read_all_forward() { [Test] public void return_all_events_on_read_all_backward() { - var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords() + var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(2, events.Length); diff --git a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_three_collisioned_streams_one_event_each_with_second_stream_deleted_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_three_collisioned_streams_one_event_each_with_second_stream_deleted_read_index_should.cs index 6ff57263487..de85cfde5ae 100644 --- a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_three_collisioned_streams_one_event_each_with_second_stream_deleted_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_three_collisioned_streams_one_event_each_with_second_stream_deleted_read_index_should.cs @@ -1,6 +1,7 @@ using System.Linq; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -195,7 +196,7 @@ public void return_empty_range_on_from_end_range_query_for_non_existing_stream_w [Test] public void return_all_events_on_read_all_forward() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(4, events.Length); @@ -207,7 +208,7 @@ public void return_all_events_on_read_all_forward() { [Test] public void return_all_events_on_read_all_backward() { - var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords() + var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(4, events.Length); diff --git a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_three_collisioned_streams_with_different_number_of_events_third_one_deleted_each_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_three_collisioned_streams_with_different_number_of_events_third_one_deleted_each_read_index_should.cs index c348fb8ace0..a3b7ed4f712 100644 --- a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_three_collisioned_streams_with_different_number_of_events_third_one_deleted_each_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_three_collisioned_streams_with_different_number_of_events_third_one_deleted_each_read_index_should.cs @@ -1,6 +1,7 @@ using System.Linq; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -556,7 +557,7 @@ public void [Test] public void return_all_prepares_on_read_all_forward() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(3 + 5 + 7 + 1, events.Length); @@ -584,7 +585,7 @@ public void return_all_prepares_on_read_all_forward() { [Test] public void return_all_prepares_on_read_all_backward() { - var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords() + var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(3 + 5 + 7 + 1, events.Length); diff --git a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_two_collisioned_streams_one_event_each_first_stream_deleted_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_two_collisioned_streams_one_event_each_first_stream_deleted_read_index_should.cs index 034505689b4..62eb3331dc3 100644 --- a/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_two_collisioned_streams_one_event_each_first_stream_deleted_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/HashCollisions/with_two_collisioned_streams_one_event_each_first_stream_deleted_read_index_should.cs @@ -1,6 +1,7 @@ using System.Linq; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -108,7 +109,7 @@ public void not_return_range_for_non_existing_stream_with_same_hash() { [Test] public void return_all_events_on_read_all_forward() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(3, events.Length); @@ -119,7 +120,7 @@ public void return_all_events_on_read_all_forward() { [Test] public void return_all_events_on_read_all_backward() { - var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords() + var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(3, events.Length); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_both_maxage_and_maxcount_specified_with_maxage_more_strict.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_both_maxage_and_maxcount_specified_with_maxage_more_strict.cs index 27aa32ac76c..998a506240d 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_both_maxage_and_maxcount_specified_with_maxage_more_strict.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_both_maxage_and_maxcount_specified_with_maxage_more_strict.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -70,7 +71,7 @@ public void backward_range_read_doesnt_return_expired_records() { [Test] public void read_all_forward_doesnt_return_expired_records() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(3, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r5, records[1].Event); @@ -79,7 +80,7 @@ public void read_all_forward_doesnt_return_expired_records() { [Test] public void read_all_backward_doesnt_return_expired_records() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(3, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_both_maxage_and_maxcount_specified_with_maxcount_more_strict.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_both_maxage_and_maxcount_specified_with_maxcount_more_strict.cs index 6eab5b6d722..688522572d4 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_both_maxage_and_maxcount_specified_with_maxcount_more_strict.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_both_maxage_and_maxcount_specified_with_maxcount_more_strict.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -75,7 +76,7 @@ public void backward_range_read_doesnt_return_expired_records() { [Test] public void read_all_forward_doesnt_return_expired_records() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(4, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r4, records[1].Event); @@ -85,7 +86,7 @@ public void read_all_forward_doesnt_return_expired_records() { [Test] public void read_all_backward_doesnt_return_expired_records() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(4, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_maxage_specified.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_maxage_specified.cs index 1ebf7267679..7dc6f389ee8 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_maxage_specified.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_maxage_specified.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -70,7 +71,7 @@ public void backward_range_read_doesnt_return_expired_records() { [Test] public void read_all_forward_doesnt_return_expired_records() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(3, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r5, records[1].Event); @@ -79,7 +80,7 @@ public void read_all_forward_doesnt_return_expired_records() { [Test] public void read_all_backward_doesnt_return_expired_records() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(3, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_maxcount_specified.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_maxcount_specified.cs index b8076e4df44..4e889dcfbdb 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_maxcount_specified.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_maxcount_specified.cs @@ -1,5 +1,6 @@ using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -73,7 +74,7 @@ public void backward_range_read_doesnt_return_expired_records() { [Test] public void read_all_forward_doesnt_return_expired_records() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(5, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r3, records[1].Event); @@ -84,7 +85,7 @@ public void read_all_forward_doesnt_return_expired_records() { [Test] public void read_all_backward_doesnt_return_expired_records() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(5, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_truncatebefore_specified.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_truncatebefore_specified.cs index 42d5157e1e7..df0582d9207 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_truncatebefore_specified.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/AfterScavenge/when_having_stream_with_truncatebefore_specified.cs @@ -1,4 +1,5 @@ using EventStore.Core.Data; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -72,7 +73,7 @@ public void backward_range_read_doesnt_return_expired_records() { [Test] public void read_all_forward_doesnt_return_expired_records() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(5, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r3, records[1].Event); @@ -83,7 +84,7 @@ public void read_all_forward_doesnt_return_expired_records() { [Test] public void read_all_backward_doesnt_return_expired_records() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(5, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_one_stream_with_maxage_and_other_stream_with_maxcount_and_streams_have_same_hash.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_one_stream_with_maxage_and_other_stream_with_maxcount_and_streams_have_same_hash.cs index 433278ff472..ee0f8afac86 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_one_stream_with_maxage_and_other_stream_with_maxcount_and_streams_have_same_hash.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_one_stream_with_maxage_and_other_stream_with_maxcount_and_streams_have_same_hash.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -148,7 +149,7 @@ public void backward_range_read_doesnt_return_expired_records_for_stream_2() { [Test] public void read_all_forward_returns_all_records_including_expired_ones() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(12, records.Count); Assert.AreEqual(_r11, records[0].Event); Assert.AreEqual(_r21, records[1].Event); @@ -171,7 +172,7 @@ public void read_all_forward_returns_all_records_including_expired_ones() { [Test] public void read_all_backward_returns_all_records_including_expired_ones() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(12, records.Count); Assert.AreEqual(_r11, records[11].Event); Assert.AreEqual(_r21, records[10].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_both_maxage_and_maxcount_specified_with_maxage_more_strict.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_both_maxage_and_maxcount_specified_with_maxage_more_strict.cs index 35f17806393..7961c9d6446 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_both_maxage_and_maxcount_specified_with_maxage_more_strict.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_both_maxage_and_maxcount_specified_with_maxage_more_strict.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -71,7 +72,7 @@ public void backward_range_read_doesnt_return_expired_records() { [Test] public void read_all_forward_returns_all_records_including_expired_ones() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r2, records[1].Event); @@ -83,7 +84,7 @@ public void read_all_forward_returns_all_records_including_expired_ones() { [Test] public void read_all_backward_returns_all_records_including_expired_ones() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_both_maxage_and_maxcount_specified_with_maxcount_more_strict.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_both_maxage_and_maxcount_specified_with_maxcount_more_strict.cs index 1761bf61d05..7f644a8f4ec 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_both_maxage_and_maxcount_specified_with_maxcount_more_strict.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_both_maxage_and_maxcount_specified_with_maxcount_more_strict.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -73,7 +74,7 @@ public void backward_range_read_doesnt_return_expired_records() { [Test] public void read_all_forward_returns_all_records_including_expired_ones() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r2, records[1].Event); @@ -85,7 +86,7 @@ public void read_all_forward_returns_all_records_including_expired_ones() { [Test] public void read_all_backward_returns_all_records_including_expired_ones() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_maxage_specified.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_maxage_specified.cs index 8de7a90b8ad..276a6dc09fb 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_maxage_specified.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_maxage_specified.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -70,7 +71,7 @@ public void backward_range_read_doesnt_return_expired_records() { [Test] public void read_all_forward_returns_all_records_including_expired_ones() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r2, records[1].Event); @@ -82,7 +83,7 @@ public void read_all_forward_returns_all_records_including_expired_ones() { [Test] public void read_all_backward_returns_all_records_including_expired_ones() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_maxcount_specified.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_maxcount_specified.cs index baf5a226725..5f570979256 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_maxcount_specified.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_maxcount_specified.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -73,7 +74,7 @@ public void backward_range_read_doesnt_return_expired_records() { [Test] public void read_all_forward_returns_all_records_including_expired_ones() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r2, records[1].Event); @@ -85,7 +86,7 @@ public void read_all_forward_returns_all_records_including_expired_ones() { [Test] public void read_all_backward_returns_all_records_including_expired_ones() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_truncatebefore_specified.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_truncatebefore_specified.cs index 488ff2e3907..566e1f20f07 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_truncatebefore_specified.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/when_having_stream_with_truncatebefore_specified.cs @@ -1,4 +1,5 @@ using EventStore.Core.Data; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -69,7 +70,7 @@ public void backward_range_read_doesnt_return_expired_records() { [Test] public void read_all_forward_returns_all_records_including_expired_ones() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r2, records[1].Event); @@ -81,7 +82,7 @@ public void read_all_forward_returns_all_records_including_expired_ones() { [Test] public void read_all_backward_returns_all_records_including_expired_ones() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_max_age.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_max_age.cs index eb47606178f..9d4feac9615 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_max_age.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_max_age.cs @@ -2,6 +2,7 @@ using EventStore.Core.Data; using EventStore.Core.Services; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -85,7 +86,7 @@ public void backward_range_read_returns_all_records() { [Test] public void read_all_forward_returns_all_records() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r2, records[1].Event); @@ -97,7 +98,7 @@ public void read_all_forward_returns_all_records() { [Test] public void read_all_backward_returns_all_records() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_max_count.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_max_count.cs index 9a2ddc5e54c..a95864ffca8 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_max_count.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_max_count.cs @@ -2,6 +2,7 @@ using EventStore.Core.Data; using EventStore.Core.Services; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -85,7 +86,7 @@ public void backward_range_read_returns_all_records() { [Test] public void read_all_forward_returns_all_records() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r2, records[1].Event); @@ -97,7 +98,7 @@ public void read_all_forward_returns_all_records() { [Test] public void read_all_backward_returns_all_records() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_start_from.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_start_from.cs index 0c6e7394fc4..dc411a09c05 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_start_from.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_big_start_from.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -74,7 +75,7 @@ public void backward_range_read_returns_no_records() { [Test] public void read_all_forward_returns_all_records() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r2, records[1].Event); @@ -86,7 +87,7 @@ public void read_all_forward_returns_all_records() { [Test] public void read_all_backward_returns_all_records() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_max_age_and_normal_max_count.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_max_age_and_normal_max_count.cs index 628bf1a5530..dfe5dfd1184 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_max_age_and_normal_max_count.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_max_age_and_normal_max_count.cs @@ -1,5 +1,6 @@ using System; using EventStore.Core.Data; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -77,7 +78,7 @@ public void on_backward_range_read_metadata_is_ignored() { [Test] public void on_read_all_forward_metadata_is_ignored() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).Records; + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).Records; if (LogFormatHelper.IsV2) { Assert.AreEqual(6, records.Count); @@ -102,7 +103,7 @@ public void on_read_all_forward_metadata_is_ignored() { [Test] public void on_read_all_backward_metadata_is_ignored() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).Records; + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).Records; if (LogFormatHelper.IsV2) { Assert.AreEqual(6, records.Count); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_max_count_and_normal_max_age.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_max_count_and_normal_max_age.cs index c0958dbe544..3c68b1f9a7e 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_max_count_and_normal_max_age.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_max_count_and_normal_max_age.cs @@ -1,5 +1,6 @@ using System; using EventStore.Core.Data; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -77,7 +78,7 @@ public void on_backward_range_read_metadata_is_ignored() { [Test] public void on_read_all_forward_metadata_is_ignored() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).Records; + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).Records; if (LogFormatHelper.IsV2) { Assert.AreEqual(6, records.Count); @@ -102,7 +103,7 @@ public void on_read_all_forward_metadata_is_ignored() { [Test] public void on_read_all_backward_metadata_is_ignored() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).Records; + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).Records; if (LogFormatHelper.IsV2) { Assert.AreEqual(6, records.Count); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_metadata.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_metadata.cs index 421e66f6257..bf64c55957f 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_metadata.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_invalid_metadata.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -77,7 +78,7 @@ public void on_backward_range_read_all_metadata_is_ignored() { [Test] public void on_read_all_forward_all_metadata_is_ignored() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r2, records[1].Event); @@ -89,7 +90,7 @@ public void on_read_all_forward_all_metadata_is_ignored() { [Test] public void on_read_all_backward_all_metadata_is_ignored() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_age.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_age.cs index 2d31673f5b0..2e866266fab 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_age.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_age.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -77,7 +78,7 @@ public void backward_range_read_returns_all_records() { [Test] public void read_all_forward_returns_all_records() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r2, records[1].Event); @@ -89,7 +90,7 @@ public void read_all_forward_returns_all_records() { [Test] public void read_all_backward_returns_all_records() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_age_and_normal_max_count.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_age_and_normal_max_count.cs index 70285f32166..f5788604149 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_age_and_normal_max_count.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_age_and_normal_max_count.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -78,7 +79,7 @@ public void on_backward_range_read_all_metadata_is_ignored() { [Test] public void on_read_all_forward_all_metadata_is_ignored() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r2, records[1].Event); @@ -90,7 +91,7 @@ public void on_read_all_forward_all_metadata_is_ignored() { [Test] public void on_read_all_backward_all_metadata_is_ignored() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_count.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_count.cs index 047d5b1cec1..f0b257cb160 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_count.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_count.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -77,7 +78,7 @@ public void backward_range_read_returns_all_records() { [Test] public void read_all_forward_returns_all_records() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r2, records[1].Event); @@ -89,7 +90,7 @@ public void read_all_forward_returns_all_records() { [Test] public void read_all_backward_returns_all_records() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_count_and_normal_max_age.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_count_and_normal_max_age.cs index edda0143129..e4326d9e8a1 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_count_and_normal_max_age.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_max_count_and_normal_max_age.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -80,7 +81,7 @@ public void on_backward_range_read_all_metadata_is_ignored() { [Test] public void on_read_all_forward_all_metadata_is_ignored() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r2, records[1].Event); @@ -92,7 +93,7 @@ public void on_read_all_forward_all_metadata_is_ignored() { [Test] public void on_read_all_backward_all_metadata_is_ignored() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_start_from.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_start_from.cs index a7932690e74..fc22106d9f4 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_start_from.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_too_big_start_from.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -77,7 +78,7 @@ public void backward_range_read_returns_all_records() { [Test] public void read_all_forward_returns_all_records() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r2, records[1].Event); @@ -89,7 +90,7 @@ public void read_all_forward_returns_all_records() { [Test] public void read_all_backward_returns_all_records() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_truncatebefore_greater_than_int_maxvalue.cs b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_truncatebefore_greater_than_int_maxvalue.cs index 7c154d78365..eb71ad17d47 100644 --- a/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_truncatebefore_greater_than_int_maxvalue.cs +++ b/src/EventStore.Core.Tests/Services/Storage/MaxAgeMaxCount/with_truncatebefore_greater_than_int_maxvalue.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -86,7 +87,7 @@ public void backward_range_read_returns_records_after_truncate_before() { [Test] public void read_all_forward_returns_all_records() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r1, records[0].Event); Assert.AreEqual(_r2, records[1].Event); @@ -98,7 +99,7 @@ public void read_all_forward_returns_all_records() { [Test] public void read_all_backward_returns_all_records() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords(); + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords(); Assert.AreEqual(6, records.Count); Assert.AreEqual(_r6, records[0].Event); Assert.AreEqual(_r5, records[1].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/Metastreams/read_index_result_original_stream_exists_tests.cs b/src/EventStore.Core.Tests/Services/Storage/Metastreams/read_index_result_original_stream_exists_tests.cs index c4571ab0c1c..67c2976f08f 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Metastreams/read_index_result_original_stream_exists_tests.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Metastreams/read_index_result_original_stream_exists_tests.cs @@ -2,6 +2,7 @@ using EventStore.Core.Services; using EventStore.Core.Services.Storage.ReaderIndex; using EventStore.Core.Tests.TransactionLog.Scavenging.Helpers; +using EventStore.Core.TransactionLog; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -25,7 +26,7 @@ protected override DbResult CreateDb(TFChunkDbCreationHelper : IIndexReader { public long NotCachedStreamInfo { get; } public long HashCollisions { get; } - public IndexReadEventResult ReadEvent(string streamName, TStreamId streamId, long eventNumber) { + public IndexReadEventResult ReadEvent(string streamName, TStreamId streamId, long eventNumber, ITransactionFileTracker tracker) { throw new NotImplementedException(); } public IndexReadStreamResult - ReadStreamEventsForward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount) { + ReadStreamEventsForward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount, ITransactionFileTracker tracker) { throw new NotImplementedException(); } public IndexReadStreamResult ReadStreamEventsBackward(string streamName, TStreamId streamId, long fromEventNumber, - int maxCount) { + int maxCount, ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public StorageMessage.EffectiveAcl GetEffectiveAcl(TStreamId streamId) { + public StorageMessage.EffectiveAcl GetEffectiveAcl(TStreamId streamId, ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public IndexReadEventInfoResult ReadEventInfo_KeepDuplicates(TStreamId streamId, long eventNumber) { + public IndexReadEventInfoResult ReadEventInfo_KeepDuplicates(TStreamId streamId, long eventNumber, ITransactionFileTracker tracker) { throw new NotImplementedException(); } public IndexReadEventInfoResult ReadEventInfoForward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, - long beforePosition) { + long beforePosition, ITransactionFileTracker tracker) { throw new NotImplementedException(); } @@ -44,36 +45,36 @@ public IndexReadEventInfoResult ReadEventInfoForward_NoCollisions(ulong stream, } public IndexReadEventInfoResult ReadEventInfoBackward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, - long beforePosition) { + long beforePosition, ITransactionFileTracker tracker) { throw new NotImplementedException(); } public IndexReadEventInfoResult ReadEventInfoBackward_NoCollisions(ulong stream, Func getStreamId, long fromEventNumber, - int maxCount, long beforePosition) { + int maxCount, long beforePosition, ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public IPrepareLogRecord ReadPrepare(TStreamId streamId, long eventNumber) { + public IPrepareLogRecord ReadPrepare(TStreamId streamId, long eventNumber, ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public TStreamId GetEventStreamIdByTransactionId(long transactionId) { + public TStreamId GetEventStreamIdByTransactionId(long transactionId, ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public StreamMetadata GetStreamMetadata(TStreamId streamId) { + public StreamMetadata GetStreamMetadata(TStreamId streamId, ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public long GetStreamLastEventNumber(TStreamId streamId) { + public long GetStreamLastEventNumber(TStreamId streamId, ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public long GetStreamLastEventNumber_KnownCollisions(TStreamId streamId, long beforePosition) { + public long GetStreamLastEventNumber_KnownCollisions(TStreamId streamId, long beforePosition, ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public long GetStreamLastEventNumber_NoCollisions(ulong stream, Func getStreamId, long beforePosition) { + public long GetStreamLastEventNumber_NoCollisions(ulong stream, Func getStreamId, long beforePosition, ITransactionFileTracker tracker) { throw new NotImplementedException(); } } diff --git a/src/EventStore.Core.Tests/Services/Storage/ReadIndex/GetStreamLastEventNumber_KnownCollisions.cs b/src/EventStore.Core.Tests/Services/Storage/ReadIndex/GetStreamLastEventNumber_KnownCollisions.cs index a2ba1b877d6..c1babfe63f1 100644 --- a/src/EventStore.Core.Tests/Services/Storage/ReadIndex/GetStreamLastEventNumber_KnownCollisions.cs +++ b/src/EventStore.Core.Tests/Services/Storage/ReadIndex/GetStreamLastEventNumber_KnownCollisions.cs @@ -1,5 +1,6 @@ using EventStore.Core.Data; using EventStore.Core.Tests.Index.Hashers; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.Services.Storage.ReadIndex { @@ -34,7 +35,7 @@ public void with_no_events() { Assert.AreEqual(ExpectedVersion.NoStream, ReadIndex.GetStreamLastEventNumber_KnownCollisions( Stream, - long.MaxValue)); + long.MaxValue, ITransactionFileTracker.NoOp)); } } @@ -49,12 +50,12 @@ public void with_one_event() { Assert.AreEqual(2, ReadIndex.GetStreamLastEventNumber_KnownCollisions( Stream, - long.MaxValue)); + long.MaxValue, ITransactionFileTracker.NoOp)); Assert.AreEqual(3, ReadIndex.GetStreamLastEventNumber_KnownCollisions( CollidingStream, - long.MaxValue)); + long.MaxValue, ITransactionFileTracker.NoOp)); } } @@ -83,17 +84,17 @@ public void with_multiple_events() { Assert.AreEqual(3, ReadIndex.GetStreamLastEventNumber_KnownCollisions( Stream, - long.MaxValue)); + long.MaxValue, ITransactionFileTracker.NoOp)); Assert.AreEqual(2, ReadIndex.GetStreamLastEventNumber_KnownCollisions( CollidingStream, - long.MaxValue)); + long.MaxValue, ITransactionFileTracker.NoOp)); Assert.AreEqual(0, ReadIndex.GetStreamLastEventNumber_KnownCollisions( CollidingStream1, - long.MaxValue)); + long.MaxValue, ITransactionFileTracker.NoOp)); } [Test] @@ -101,27 +102,27 @@ public void with_multiple_events_and_before_position() { Assert.AreEqual(3, ReadIndex.GetStreamLastEventNumber_KnownCollisions( Stream, - _third.LogPosition + 1)); + _third.LogPosition + 1, ITransactionFileTracker.NoOp)); Assert.AreEqual(2, ReadIndex.GetStreamLastEventNumber_KnownCollisions( Stream, - _third.LogPosition)); + _third.LogPosition, ITransactionFileTracker.NoOp)); Assert.AreEqual(1, ReadIndex.GetStreamLastEventNumber_KnownCollisions( Stream, - _second.LogPosition)); + _second.LogPosition, ITransactionFileTracker.NoOp)); Assert.AreEqual(0, ReadIndex.GetStreamLastEventNumber_KnownCollisions( Stream, - _first.LogPosition)); + _first.LogPosition, ITransactionFileTracker.NoOp)); Assert.AreEqual(ExpectedVersion.NoStream, ReadIndex.GetStreamLastEventNumber_KnownCollisions( Stream, - _zeroth.LogPosition)); + _zeroth.LogPosition, ITransactionFileTracker.NoOp)); } } @@ -139,12 +140,12 @@ public void with_deleted_stream() { Assert.AreEqual(EventNumber.DeletedStream, ReadIndex.GetStreamLastEventNumber_KnownCollisions( Stream, - long.MaxValue)); + long.MaxValue, ITransactionFileTracker.NoOp)); Assert.AreEqual(1, ReadIndex.GetStreamLastEventNumber_KnownCollisions( CollidingStream, - long.MaxValue)); + long.MaxValue, ITransactionFileTracker.NoOp)); } } } diff --git a/src/EventStore.Core.Tests/Services/Storage/ReadIndex/GetStreamLastEventNumber_KnownCollisions_Randomized.cs b/src/EventStore.Core.Tests/Services/Storage/ReadIndex/GetStreamLastEventNumber_KnownCollisions_Randomized.cs index b4bd909804c..e7f6944d5e0 100644 --- a/src/EventStore.Core.Tests/Services/Storage/ReadIndex/GetStreamLastEventNumber_KnownCollisions_Randomized.cs +++ b/src/EventStore.Core.Tests/Services/Storage/ReadIndex/GetStreamLastEventNumber_KnownCollisions_Randomized.cs @@ -2,6 +2,7 @@ using System.Collections.Generic; using EventStore.Core.Data; using EventStore.Core.Tests.Index.Hashers; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.Services.Storage.ReadIndex { @@ -47,10 +48,10 @@ public void returns_correct_last_event_number_before_position() { foreach (var @event in _events) { Assert.AreEqual(streamLast, - ReadIndex.GetStreamLastEventNumber_KnownCollisions(Stream, @event.LogPosition)); + ReadIndex.GetStreamLastEventNumber_KnownCollisions(Stream, @event.LogPosition, ITransactionFileTracker.NoOp)); Assert.AreEqual(collidingStreamLast, - ReadIndex.GetStreamLastEventNumber_KnownCollisions(CollidingStream, @event.LogPosition)); + ReadIndex.GetStreamLastEventNumber_KnownCollisions(CollidingStream, @event.LogPosition, ITransactionFileTracker.NoOp)); switch (@event.EventStreamId) { diff --git a/src/EventStore.Core.Tests/Services/Storage/ReadIndex/GetStreamLastEventNumber_NoCollisions_Randomized.cs b/src/EventStore.Core.Tests/Services/Storage/ReadIndex/GetStreamLastEventNumber_NoCollisions_Randomized.cs index 12e3a7d4622..4aa23cd55a9 100644 --- a/src/EventStore.Core.Tests/Services/Storage/ReadIndex/GetStreamLastEventNumber_NoCollisions_Randomized.cs +++ b/src/EventStore.Core.Tests/Services/Storage/ReadIndex/GetStreamLastEventNumber_NoCollisions_Randomized.cs @@ -2,6 +2,7 @@ using System.Collections.Generic; using EventStore.Core.Data; using EventStore.Core.Tests.Index.Hashers; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.Services.Storage.ReadIndex { @@ -48,7 +49,7 @@ public void returns_correct_last_event_number_before_position() { foreach (var @event in _events) { Assert.AreEqual(expectedLastEventNumber, - ReadIndex.GetStreamLastEventNumber_NoCollisions(Hash, GetStreamId, @event.LogPosition)); + ReadIndex.GetStreamLastEventNumber_NoCollisions(Hash, GetStreamId, @event.LogPosition, ITransactionFileTracker.NoOp)); if (@event.EventStreamId == Stream) expectedLastEventNumber = @event.EventNumber; diff --git a/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoBackward_KnownCollisions.cs b/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoBackward_KnownCollisions.cs index 00fbde3972f..986e4b39dd7 100644 --- a/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoBackward_KnownCollisions.cs +++ b/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoBackward_KnownCollisions.cs @@ -3,6 +3,7 @@ using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; using EventStore.Core.Tests.Index.Hashers; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.Services.Storage.ReadIndex { @@ -48,7 +49,7 @@ public void with_no_events() { Stream, 0, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(0, result.EventInfos.Length); Assert.True(result.IsEndOfStream); @@ -57,7 +58,7 @@ public void with_no_events() { Stream, -1, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(0, result.EventInfos.Length); Assert.True(result.IsEndOfStream); @@ -79,7 +80,7 @@ public void with_one_event() { Stream, 0, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.EventInfos.Length); CheckResult(new[] { _event }, result); @@ -89,7 +90,7 @@ public void with_one_event() { Stream, 1, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(new[] { _event }, result); Assert.True(result.IsEndOfStream); @@ -98,7 +99,7 @@ public void with_one_event() { Stream, -1, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.EventInfos.Length); CheckResult(new[] { _event }, result); @@ -132,7 +133,7 @@ public void with_multiple_events() { Stream, fromEventNumber, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(_events.Take(fromEventNumber + 1).ToArray(), result); Assert.True(result.IsEndOfStream); @@ -146,7 +147,7 @@ public void with_multiple_events_and_max_count() { Stream, fromEventNumber, 2, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(_events.Take(fromEventNumber + 1).Skip(fromEventNumber + 1 - 2).ToArray(), result); if (fromEventNumber - 2 < 0) @@ -163,7 +164,7 @@ public void with_multiple_events_and_before_position() { Stream, fromEventNumber, int.MaxValue, - _events[fromEventNumber + 1].LogPosition); + _events[fromEventNumber + 1].LogPosition, ITransactionFileTracker.NoOp); CheckResult(_events.Take(fromEventNumber + 1).ToArray(), result); Assert.True(result.IsEndOfStream); @@ -172,7 +173,7 @@ public void with_multiple_events_and_before_position() { Stream, -1, int.MaxValue, - _events[fromEventNumber + 1].LogPosition); + _events[fromEventNumber + 1].LogPosition, ITransactionFileTracker.NoOp); CheckResult(_events.Take(fromEventNumber + 1).ToArray(), result); Assert.True(result.IsEndOfStream); @@ -198,7 +199,7 @@ public void can_read_events() { Stream, 1, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(_events.ToArray(), result); Assert.True(result.IsEndOfStream); @@ -210,7 +211,7 @@ public void can_read_tombstone_event() { Stream, EventNumber.DeletedStream, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.EventInfos.Length); Assert.AreEqual(EventNumber.DeletedStream, result.EventInfos[0].EventNumber); @@ -220,7 +221,7 @@ public void can_read_tombstone_event() { Stream, -1, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.EventInfos.Length); Assert.AreEqual(EventNumber.DeletedStream, result.EventInfos[0].EventNumber); @@ -230,7 +231,7 @@ public void can_read_tombstone_event() { Stream, EventNumber.DeletedStream - 1, 1, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(0, result.EventInfos.Length); Assert.AreEqual(1, result.NextEventNumber); @@ -262,7 +263,7 @@ public void strictly_returns_up_to_max_count_consecutive_events_from_start_event Stream, 7, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(_events.ToArray(), result); Assert.True(result.IsEndOfStream); @@ -271,7 +272,7 @@ public void strictly_returns_up_to_max_count_consecutive_events_from_start_event Stream, 7, 4, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(_events.Skip(1).ToArray(), result); Assert.AreEqual(3, result.NextEventNumber); @@ -280,7 +281,7 @@ public void strictly_returns_up_to_max_count_consecutive_events_from_start_event Stream, 3, 1, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(0, result.EventInfos.Length); Assert.AreEqual(2, result.NextEventNumber); // from colliding stream, but doesn't matter much @@ -289,7 +290,7 @@ public void strictly_returns_up_to_max_count_consecutive_events_from_start_event Stream, 2, 1, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(0, result.EventInfos.Length); Assert.AreEqual(1, result.NextEventNumber); // from colliding stream, but doesn't matter much @@ -324,7 +325,7 @@ public void result_is_deduplicated_keeping_oldest_duplicates() { Stream, 3, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult( _events diff --git a/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoBackward_KnownCollisions_Randomized.cs b/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoBackward_KnownCollisions_Randomized.cs index bb0faa8feb9..16b7d1df045 100644 --- a/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoBackward_KnownCollisions_Randomized.cs +++ b/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoBackward_KnownCollisions_Randomized.cs @@ -4,6 +4,7 @@ using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; using EventStore.Core.Tests.Index.Hashers; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.Services.Storage.ReadIndex { @@ -59,23 +60,23 @@ public void returns_correct_events_before_position() { IndexReadEventInfoResult result; if (@event.EventStreamId == Stream) { result = ReadIndex.ReadEventInfoBackward_KnownCollisions(Stream, - @event.EventNumber - 1, int.MaxValue, @event.LogPosition); + @event.EventNumber - 1, int.MaxValue, @event.LogPosition, ITransactionFileTracker.NoOp); CheckResult(curEvents.ToArray(),result); Assert.True(result.IsEndOfStream); // events >= @event.EventNumber should be filtered out result = ReadIndex.ReadEventInfoBackward_KnownCollisions(Stream, - @event.EventNumber, int.MaxValue, @event.LogPosition); + @event.EventNumber, int.MaxValue, @event.LogPosition, ITransactionFileTracker.NoOp); CheckResult(curEvents.ToArray(), result); Assert.True(result.IsEndOfStream); result = ReadIndex.ReadEventInfoBackward_KnownCollisions(Stream, - @event.EventNumber + 1, int.MaxValue, @event.LogPosition); + @event.EventNumber + 1, int.MaxValue, @event.LogPosition, ITransactionFileTracker.NoOp); CheckResult(curEvents.ToArray(), result); Assert.True(result.IsEndOfStream); } - result = ReadIndex.ReadEventInfoBackward_KnownCollisions(Stream, -1, int.MaxValue, @event.LogPosition); + result = ReadIndex.ReadEventInfoBackward_KnownCollisions(Stream, -1, int.MaxValue, @event.LogPosition, ITransactionFileTracker.NoOp); CheckResult(curEvents.ToArray(), result); Assert.True(result.IsEndOfStream); @@ -99,7 +100,7 @@ public void returns_correct_events_with_max_count() { Assert.GreaterOrEqual(fromEventNumber, 0); var result = ReadIndex.ReadEventInfoBackward_KnownCollisions( - Stream, fromEventNumber, maxCount, long.MaxValue); + Stream, fromEventNumber, maxCount, long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(curEvents.Skip(curEvents.Count - maxCount).ToArray(), result); if (fromEventNumber - maxCount < 0) diff --git a/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoBackward_NoCollisions.cs b/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoBackward_NoCollisions.cs index 4b397a0866f..57c037072d6 100644 --- a/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoBackward_NoCollisions.cs +++ b/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoBackward_NoCollisions.cs @@ -4,6 +4,7 @@ using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; using EventStore.Core.Tests.Index.Hashers; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.Services.Storage.ReadIndex { @@ -48,7 +49,7 @@ public void with_no_events() { GetStreamId, 0, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(0, result.EventInfos.Length); Assert.True(result.IsEndOfStream); @@ -58,7 +59,7 @@ public void with_no_events() { GetStreamId, -1, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(0, result.EventInfos.Length); Assert.True(result.IsEndOfStream); @@ -79,7 +80,7 @@ public void with_one_event() { GetStreamId, 0, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.EventInfos.Length); CheckResult(new[] { _event }, result); @@ -90,7 +91,7 @@ public void with_one_event() { GetStreamId, 1, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(new[] { _event }, result); Assert.True(result.IsEndOfStream); @@ -100,7 +101,7 @@ public void with_one_event() { GetStreamId, -1, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.EventInfos.Length); CheckResult(new[] { _event }, result); @@ -135,7 +136,7 @@ public void with_multiple_events() { GetStreamId, fromEventNumber, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(_events.Take(fromEventNumber + 1).ToArray(), result); Assert.True(result.IsEndOfStream); @@ -150,7 +151,7 @@ public void with_multiple_events_and_max_count() { GetStreamId, fromEventNumber, 2, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(_events.Take(fromEventNumber + 1).Skip(fromEventNumber + 1 - 2).ToArray(), result); if (fromEventNumber - 2 < 0) @@ -168,7 +169,7 @@ public void with_multiple_events_and_before_position() { GetStreamId, fromEventNumber, int.MaxValue, - _events[fromEventNumber + 1].LogPosition); + _events[fromEventNumber + 1].LogPosition, ITransactionFileTracker.NoOp); CheckResult(_events.Take(fromEventNumber + 1).ToArray(), result); Assert.True(result.IsEndOfStream); @@ -178,7 +179,7 @@ public void with_multiple_events_and_before_position() { GetStreamId, -1, int.MaxValue, - _events[fromEventNumber + 1].LogPosition); + _events[fromEventNumber + 1].LogPosition, ITransactionFileTracker.NoOp); CheckResult(_events.Take(fromEventNumber + 1).ToArray(), result); Assert.True(result.IsEndOfStream); @@ -204,7 +205,7 @@ public void can_read_events() { GetStreamId, 1, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(_events.ToArray(), result); Assert.True(result.IsEndOfStream); @@ -217,7 +218,7 @@ public void can_read_tombstone_event() { GetStreamId, EventNumber.DeletedStream, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.EventInfos.Length); Assert.AreEqual(EventNumber.DeletedStream, result.EventInfos[0].EventNumber); @@ -228,7 +229,7 @@ public void can_read_tombstone_event() { GetStreamId, -1, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.EventInfos.Length); Assert.AreEqual(EventNumber.DeletedStream, result.EventInfos[0].EventNumber); @@ -239,7 +240,7 @@ public void can_read_tombstone_event() { GetStreamId, EventNumber.DeletedStream - 1, 1, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(0, result.EventInfos.Length); Assert.AreEqual(1, result.NextEventNumber); @@ -272,7 +273,7 @@ public void strictly_returns_up_to_max_count_consecutive_events_from_start_event GetStreamId, 7, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(_events.ToArray(), result); Assert.True(result.IsEndOfStream); @@ -282,7 +283,7 @@ public void strictly_returns_up_to_max_count_consecutive_events_from_start_event GetStreamId, 7, 4, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(_events.Skip(1).ToArray(), result); Assert.AreEqual(3, result.NextEventNumber); @@ -293,7 +294,7 @@ public void strictly_returns_up_to_max_count_consecutive_events_from_start_event GetStreamId, 3, 1, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(0, result.EventInfos.Length); Assert.AreEqual(0, result.NextEventNumber); @@ -328,7 +329,7 @@ public void result_is_deduplicated_keeping_oldest_duplicates() { GetStreamId, 3, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult( _events diff --git a/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoBackward_NoCollisions_Randomized.cs b/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoBackward_NoCollisions_Randomized.cs index 29e2e48fe3d..b0dca6c8ab2 100644 --- a/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoBackward_NoCollisions_Randomized.cs +++ b/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoBackward_NoCollisions_Randomized.cs @@ -4,6 +4,7 @@ using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; using EventStore.Core.Tests.Index.Hashers; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.Services.Storage.ReadIndex { @@ -62,24 +63,24 @@ public void returns_correct_events_before_position() { IndexReadEventInfoResult result; if (@event.EventStreamId == Stream) { result = ReadIndex.ReadEventInfoBackward_NoCollisions(Hash, GetStreamId, - @event.EventNumber - 1, int.MaxValue, @event.LogPosition); + @event.EventNumber - 1, int.MaxValue, @event.LogPosition, ITransactionFileTracker.NoOp); CheckResult(curEvents.ToArray(), result); Assert.True(result.IsEndOfStream); // events >= @event.EventNumber should be filtered out result = ReadIndex.ReadEventInfoBackward_NoCollisions(Hash, GetStreamId, - @event.EventNumber, int.MaxValue, @event.LogPosition); + @event.EventNumber, int.MaxValue, @event.LogPosition, ITransactionFileTracker.NoOp); CheckResult(curEvents.ToArray(), result); Assert.True(result.IsEndOfStream); result = ReadIndex.ReadEventInfoBackward_NoCollisions(Hash, GetStreamId, - @event.EventNumber + 1, int.MaxValue, @event.LogPosition); + @event.EventNumber + 1, int.MaxValue, @event.LogPosition, ITransactionFileTracker.NoOp); CheckResult(curEvents.ToArray(), result); Assert.True(result.IsEndOfStream); } result = ReadIndex.ReadEventInfoBackward_NoCollisions(Hash, GetStreamId, -1, int.MaxValue, - @event.LogPosition); + @event.LogPosition, ITransactionFileTracker.NoOp); CheckResult(curEvents.ToArray(), result); Assert.True(result.IsEndOfStream); @@ -104,7 +105,7 @@ public void returns_correct_events_with_max_count() { var result = ReadIndex.ReadEventInfoBackward_NoCollisions( - Hash, GetStreamId, fromEventNumber, maxCount, long.MaxValue); + Hash, GetStreamId, fromEventNumber, maxCount, long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(curEvents.Skip(curEvents.Count - maxCount).ToArray(), result); if (fromEventNumber - maxCount < 0) diff --git a/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoForward_KnownCollisions.cs b/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoForward_KnownCollisions.cs index 1ec7577bcdd..171235f2ef2 100644 --- a/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoForward_KnownCollisions.cs +++ b/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoForward_KnownCollisions.cs @@ -3,6 +3,7 @@ using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; using EventStore.Core.Tests.Index.Hashers; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.Services.Storage.ReadIndex { @@ -47,7 +48,7 @@ public void with_no_events() { Stream, 0, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(0, result.EventInfos.Length); Assert.True(result.IsEndOfStream); @@ -68,7 +69,7 @@ public void with_one_event() { Stream, 0, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.EventInfos.Length); CheckResult(new[] { _event }, result); @@ -78,7 +79,7 @@ public void with_one_event() { Stream, 1, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(new EventRecord[] { }, result); Assert.True(result.IsEndOfStream); @@ -87,7 +88,7 @@ public void with_one_event() { CollidingStream, 0, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.EventInfos.Length); CheckResult(new[] { _collidingEvent }, result); @@ -97,7 +98,7 @@ public void with_one_event() { CollidingStream, 1, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(new EventRecord[] { }, result); Assert.True(result.IsEndOfStream); @@ -130,7 +131,7 @@ public void with_multiple_events() { Stream, fromEventNumber, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(_events.Skip(fromEventNumber).ToArray(), result); if (fromEventNumber > 3) @@ -147,7 +148,7 @@ public void with_multiple_events_and_max_count() { Stream, fromEventNumber, 2, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(_events.Skip(fromEventNumber).Take(2).ToArray(), result); if (fromEventNumber > 3) @@ -164,7 +165,7 @@ public void with_multiple_events_and_before_position() { Stream, fromEventNumber, int.MaxValue, - _events[fromEventNumber + 1].LogPosition); + _events[fromEventNumber + 1].LogPosition, ITransactionFileTracker.NoOp); CheckResult(_events.Skip(fromEventNumber).Take(1).ToArray(), result); Assert.AreEqual((long) fromEventNumber + int.MaxValue, result.NextEventNumber); @@ -190,7 +191,7 @@ public void can_read_events_and_tombstone_event_not_returned() { Stream, 0, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(_events.ToArray(), result); Assert.AreEqual(int.MaxValue, result.NextEventNumber); @@ -202,7 +203,7 @@ public void next_event_number_set_correctly() { Stream, 2, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(0, result.EventInfos.Length); Assert.AreEqual(EventNumber.DeletedStream, result.NextEventNumber); @@ -214,7 +215,7 @@ public void can_read_tombstone_event() { Stream, EventNumber.DeletedStream, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(1, result.EventInfos.Length); Assert.AreEqual(EventNumber.DeletedStream, result.EventInfos[0].EventNumber); @@ -248,7 +249,7 @@ public void strictly_returns_up_to_max_count_consecutive_events_from_start_event Stream, 0, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(_events.ToArray(), result); Assert.AreEqual(int.MaxValue, result.NextEventNumber); @@ -257,7 +258,7 @@ public void strictly_returns_up_to_max_count_consecutive_events_from_start_event Stream, 0, 3, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(_events.Take(1).ToArray(), result); Assert.AreEqual(3, result.NextEventNumber); @@ -266,7 +267,7 @@ public void strictly_returns_up_to_max_count_consecutive_events_from_start_event Stream, 3, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(_events.Skip(1).ToArray(), result); Assert.AreEqual((long ) 3 + int.MaxValue, result.NextEventNumber); @@ -275,7 +276,7 @@ public void strictly_returns_up_to_max_count_consecutive_events_from_start_event Stream, 4, 3, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(_events.Skip(1).Take(2).ToArray(), result); Assert.AreEqual(7, result.NextEventNumber); @@ -284,7 +285,7 @@ public void strictly_returns_up_to_max_count_consecutive_events_from_start_event Stream, 7, 3, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(0, result.EventInfos.Length); Assert.AreEqual(11, result.NextEventNumber); @@ -293,7 +294,7 @@ public void strictly_returns_up_to_max_count_consecutive_events_from_start_event Stream, 12, 1, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(0, result.EventInfos.Length); Assert.AreEqual(15, result.NextEventNumber); // from colliding stream, but doesn't matter much @@ -302,7 +303,7 @@ public void strictly_returns_up_to_max_count_consecutive_events_from_start_event Stream, 12, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); Assert.AreEqual(0, result.EventInfos.Length); Assert.True(result.IsEndOfStream); @@ -336,7 +337,7 @@ public void result_is_deduplicated_keeping_oldest_duplicates() { Stream, 0, int.MaxValue, - long.MaxValue); + long.MaxValue, ITransactionFileTracker.NoOp); CheckResult( _events diff --git a/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoForward_KnownCollisions_Randomized.cs b/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoForward_KnownCollisions_Randomized.cs index ba6c57ec2df..2dcec589ef4 100644 --- a/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoForward_KnownCollisions_Randomized.cs +++ b/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfoForward_KnownCollisions_Randomized.cs @@ -4,6 +4,7 @@ using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; using EventStore.Core.Tests.Index.Hashers; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.Services.Storage.ReadIndex { @@ -54,7 +55,7 @@ public void returns_correct_events_before_position() { var curEvents = new List(); foreach (var @event in _events) { var result = - ReadIndex.ReadEventInfoForward_KnownCollisions(Stream, 0, int.MaxValue, @event.LogPosition); + ReadIndex.ReadEventInfoForward_KnownCollisions(Stream, 0, int.MaxValue, @event.LogPosition, ITransactionFileTracker.NoOp); CheckResult(curEvents.ToArray(), result); if (curEvents.Count == 0) Assert.True(result.IsEndOfStream); @@ -81,7 +82,7 @@ public void returns_correct_events_with_max_count() { Assert.GreaterOrEqual(fromEventNumber, 0); var result = ReadIndex.ReadEventInfoForward_KnownCollisions( - Stream, fromEventNumber, maxCount, long.MaxValue); + Stream, fromEventNumber, maxCount, long.MaxValue, ITransactionFileTracker.NoOp); CheckResult(curEvents.Skip(curEvents.Count - maxCount).ToArray(), result); Assert.AreEqual(@event.EventNumber + 1, result.NextEventNumber); } diff --git a/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfo_KeepDuplicates.cs b/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfo_KeepDuplicates.cs index 38ca0037d14..9ff0f9d9d3b 100644 --- a/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfo_KeepDuplicates.cs +++ b/src/EventStore.Core.Tests/Services/Storage/ReadIndex/ReadEventInfo_KeepDuplicates.cs @@ -3,6 +3,7 @@ using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; using EventStore.Core.Tests.Index.Hashers; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.Services.Storage.ReadIndex { @@ -51,7 +52,7 @@ protected override void WriteTestScenario() { [Test] public void returns_correct_info_for_normal_event() { - var result = ReadIndex.ReadEventInfo_KeepDuplicates(Stream, 1); + var result = ReadIndex.ReadEventInfo_KeepDuplicates(Stream, 1, ITransactionFileTracker.NoOp); var events = _events .Where(x => x.EventStreamId == Stream && x.EventNumber == 1) .ToArray(); @@ -64,7 +65,7 @@ public void returns_correct_info_for_normal_event() { [Test] public void returns_correct_info_for_duplicate_events() { - var result = ReadIndex.ReadEventInfo_KeepDuplicates(Stream, 2); + var result = ReadIndex.ReadEventInfo_KeepDuplicates(Stream, 2, ITransactionFileTracker.NoOp); var events = _events .Where(x => x.EventStreamId == Stream && x.EventNumber == 2) .ToArray(); @@ -77,7 +78,7 @@ public void returns_correct_info_for_duplicate_events() { [Test] public void returns_correct_info_for_colliding_stream() { - var result = ReadIndex.ReadEventInfo_KeepDuplicates(Stream, 3); + var result = ReadIndex.ReadEventInfo_KeepDuplicates(Stream, 3, ITransactionFileTracker.NoOp); var events = _events .Where(x => x.EventStreamId == Stream && x.EventNumber == 3) .ToArray(); @@ -87,7 +88,7 @@ public void returns_correct_info_for_colliding_stream() { Assert.AreEqual(true, result.IsEndOfStream); CheckResult(events, result); - result = ReadIndex.ReadEventInfo_KeepDuplicates(CollidingStream, 3); + result = ReadIndex.ReadEventInfo_KeepDuplicates(CollidingStream, 3, ITransactionFileTracker.NoOp); events = _events .Where(x => x.EventStreamId == CollidingStream && x.EventNumber == 3) .ToArray(); @@ -100,7 +101,7 @@ public void returns_correct_info_for_colliding_stream() { [Test] public void returns_correct_info_for_soft_deleted_stream() { - var result = ReadIndex.ReadEventInfo_KeepDuplicates(SoftDeletedStream, 10); + var result = ReadIndex.ReadEventInfo_KeepDuplicates(SoftDeletedStream, 10, ITransactionFileTracker.NoOp); var events = _events .Where(x => x.EventStreamId == SoftDeletedStream && x.EventNumber == 10) .ToArray(); @@ -113,7 +114,7 @@ public void returns_correct_info_for_soft_deleted_stream() { [Test] public void returns_correct_info_for_hard_deleted_stream() { - var result = ReadIndex.ReadEventInfo_KeepDuplicates(HardDeletedStream, 20); + var result = ReadIndex.ReadEventInfo_KeepDuplicates(HardDeletedStream, 20, ITransactionFileTracker.NoOp); var events = _events .Where(x => x.EventStreamId == HardDeletedStream && x.EventNumber == 20) .ToArray(); @@ -126,7 +127,7 @@ public void returns_correct_info_for_hard_deleted_stream() { [Test] public void returns_empty_info_when_event_does_not_exist() { - var result = ReadIndex.ReadEventInfo_KeepDuplicates(Stream, 6); + var result = ReadIndex.ReadEventInfo_KeepDuplicates(Stream, 6, ITransactionFileTracker.NoOp); var events = _events .Where(x => x.EventStreamId == Stream && x.EventNumber == 6) .ToArray(); @@ -135,7 +136,7 @@ public void returns_empty_info_when_event_does_not_exist() { Assert.AreEqual(-1, result.NextEventNumber); Assert.AreEqual(true, result.IsEndOfStream); - result = ReadIndex.ReadEventInfo_KeepDuplicates(CollidingStream, 4); + result = ReadIndex.ReadEventInfo_KeepDuplicates(CollidingStream, 4, ITransactionFileTracker.NoOp); events = _events .Where(x => x.EventStreamId == CollidingStream && x.EventNumber == 4) .ToArray(); diff --git a/src/EventStore.Core.Tests/Services/Storage/ReadIndexTestScenario.cs b/src/EventStore.Core.Tests/Services/Storage/ReadIndexTestScenario.cs index ca3e2a05064..cd4dc9f7e7c 100644 --- a/src/EventStore.Core.Tests/Services/Storage/ReadIndexTestScenario.cs +++ b/src/EventStore.Core.Tests/Services/Storage/ReadIndexTestScenario.cs @@ -111,7 +111,7 @@ public override async Task TestFixtureSetUp() { var emptyStreamId = _logFormat.EmptyStreamId; TableIndex = TransformTableIndex(new TableIndex(indexDirectory, LowHasher, HighHasher, emptyStreamId, () => new HashListMemTable(IndexBitnessVersion, MaxEntriesInMemTable * 2), - () => new TFReaderLease(readers), + _ => new TFReaderLease(readers, ITransactionFileTracker.NoOp), IndexBitnessVersion, int.MaxValue, Constants.PTableMaxReaderCountDefault, @@ -140,6 +140,7 @@ public override async Task TestFixtureSetUp() { indexCheckpoint: Db.Config.IndexCheckpoint, indexStatusTracker: new IndexStatusTracker.NoOp(), indexTracker: new IndexTracker.NoOp(), + tfTrackers: ITransactionFileTrackerFactory.NoOp, cacheTracker: new CacheHitsMissesTracker.NoOp()); readIndex.IndexCommitter.Init(ChaserCheckpoint.Read()); @@ -152,7 +153,8 @@ public override async Task TestFixtureSetUp() { if (_scavenge) { if (_completeLastChunkOnScavenge) Db.Manager.GetChunk(Db.Manager.ChunksCount - 1).Complete(); - _scavenger = new TFChunkScavenger(Serilog.Log.Logger, Db, new FakeTFScavengerLog(), TableIndex, ReadIndex, _logFormat.Metastreams); + _scavenger = new TFChunkScavenger(Serilog.Log.Logger, Db, new FakeTFScavengerLog(), TableIndex, ReadIndex, _logFormat.Metastreams, + ITransactionFileTracker.NoOp); await _scavenger.Scavenge(alwaysKeepScavenged: true, mergeChunks: _mergeChunks, scavengeIndex: _scavengeIndex); } diff --git a/src/EventStore.Core.Tests/Services/Storage/RepeatableDbTestScenario.cs b/src/EventStore.Core.Tests/Services/Storage/RepeatableDbTestScenario.cs index a348a4089bf..5cb3cd70411 100644 --- a/src/EventStore.Core.Tests/Services/Storage/RepeatableDbTestScenario.cs +++ b/src/EventStore.Core.Tests/Services/Storage/RepeatableDbTestScenario.cs @@ -64,7 +64,7 @@ public void CreateDb(params Rec[] records) { var emptyStreamId = _logFormat.EmptyStreamId; TableIndex = new TableIndex(indexDirectory, lowHasher, highHasher, emptyStreamId, () => new HashListMemTable(PTableVersions.IndexV3, MaxEntriesInMemTable * 2), - () => new TFReaderLease(readers), + _ => new TFReaderLease(readers, ITransactionFileTracker.NoOp), PTableVersions.IndexV3, int.MaxValue, Constants.PTableMaxReaderCountDefault, @@ -93,6 +93,7 @@ public void CreateDb(params Rec[] records) { indexCheckpoint: DbRes.Db.Config.IndexCheckpoint, indexStatusTracker: new IndexStatusTracker.NoOp(), indexTracker: new IndexTracker.NoOp(), + tfTrackers: ITransactionFileTrackerFactory.NoOp, cacheTracker: new CacheHitsMissesTracker.NoOp()); readIndex.IndexCommitter.Init(DbRes.Db.Config.ChaserCheckpoint.Read()); diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_duplicate_events.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_duplicate_events.cs index 80d6aa37ad4..8e52afcc4a3 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_duplicate_events.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_duplicate_events.cs @@ -1,5 +1,6 @@ using System.Linq; using EventStore.Core.Data; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.Services.Storage.Scavenge { @@ -52,7 +53,7 @@ protected override void WriteTestScenario() { [Test] public void read_all_events_forward_does_not_return_duplicate() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(11, events.Length); diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_2nd_chunk__in_db_with_3_chunks.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_2nd_chunk__in_db_with_3_chunks.cs index 8e2e68ed4aa..1b89c0ad9e7 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_2nd_chunk__in_db_with_3_chunks.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_2nd_chunk__in_db_with_3_chunks.cs @@ -1,6 +1,7 @@ using System; using System.Linq; using EventStore.Core.Data; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; @@ -28,7 +29,7 @@ protected override void WriteTestScenario() { [Test] public void read_all_forward_does_not_return_scavenged_deleted_stream_events_and_return_remaining() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(2, events.Length); @@ -38,7 +39,7 @@ public void read_all_forward_does_not_return_scavenged_deleted_stream_events_and [Test] public void read_all_backward_does_not_return_scavenged_deleted_stream_events_and_return_remaining() { - var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords() + var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(2, events.Length); @@ -49,7 +50,7 @@ public void read_all_backward_does_not_return_scavenged_deleted_stream_events_an [Test] public void read_all_backward_from_beginning_of_second_chunk_returns_no_records() { var pos = new TFPos(10000, 10000); - var events = ReadIndex.ReadAllEventsBackward(pos, 100).EventRecords() + var events = ReadIndex.ReadAllEventsBackward(pos, 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(0, events.Length); @@ -58,7 +59,7 @@ public void read_all_backward_from_beginning_of_second_chunk_returns_no_records( [Test] public void read_all_forward_from_beginning_of_2nd_chunk_with_max_2_record_returns_delete_record_and_record_from_3rd_chunk() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(10000, 10000), 100).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(10000, 10000), 100, ITransactionFileTracker.NoOp).EventRecords() .Take(2) .Select(r => r.Event) .ToArray(); @@ -69,7 +70,7 @@ public void [Test] public void read_all_forward_with_max_5_records_returns_2_records_from_2nd_chunk() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 5).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 5, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(2, events.Length); @@ -93,7 +94,7 @@ public void last_physical_record_from_scavenged_stream_should_remain() { var chunk = Db.Manager.GetChunk(1); var chunkPos = (int)(_event7.LogPosition % Db.Config.ChunkSize); - var res = chunk.TryReadAt(chunkPos, couldBeScavenged: false); + var res = chunk.TryReadAt(chunkPos, couldBeScavenged: false, tracker: ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); } diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_db_with_2_chunks.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_db_with_2_chunks.cs index 703d784f407..4682018b660 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_db_with_2_chunks.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_db_with_2_chunks.cs @@ -1,5 +1,6 @@ using System.Linq; using EventStore.Core.Data; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.Services.Storage.Scavenge { @@ -24,7 +25,7 @@ protected override void WriteTestScenario() { [Test] public void read_all_forward_returns_events_only_from_uncompleted_chunk_and_delete_record() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(3, events.Length); @@ -35,7 +36,7 @@ public void read_all_forward_returns_events_only_from_uncompleted_chunk_and_dele [Test] public void read_all_backward_returns_events_only_from_uncompleted_chunk_and_delete_record() { - var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords() + var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(3, events.Length); @@ -47,7 +48,7 @@ public void read_all_backward_returns_events_only_from_uncompleted_chunk_and_del [Test] public void read_all_backward_from_beginning_of_second_chunk_returns_no_records() { var pos = new TFPos(10000, 10000); - var events = ReadIndex.ReadAllEventsBackward(pos, 100).EventRecords() + var events = ReadIndex.ReadAllEventsBackward(pos, 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(0, events.Length); @@ -55,7 +56,7 @@ public void read_all_backward_from_beginning_of_second_chunk_returns_no_records( [Test] public void read_all_forward_from_beginning_of_second_chunk_with_max_1_record_returns_5th_record() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(10000, 10000), 1).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(10000, 10000), 1, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(1, events.Length); @@ -64,7 +65,7 @@ public void read_all_forward_from_beginning_of_second_chunk_with_max_1_record_re [Test] public void read_all_forward_with_max_5_records_returns_3_records_from_second_chunk_and_delete_record() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 5).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 5, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(3, events.Length); diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_db_with_3_chunks.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_db_with_3_chunks.cs index 45a0481513b..2706cc23728 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_db_with_3_chunks.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_deleting_single_stream_spanning_through_2_chunks_in_db_with_3_chunks.cs @@ -1,5 +1,6 @@ using System.Linq; using EventStore.Core.Data; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.Services.Storage.Scavenge { @@ -26,7 +27,7 @@ protected override void WriteTestScenario() { [Test] public void read_all_forward_does_not_return_scavenged_deleted_stream_events_and_return_remaining_plus_delete_record() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(2, events.Length); @@ -37,7 +38,7 @@ public void [Test] public void read_all_backward_does_not_return_scavenged_deleted_stream_events_and_return_remaining_plus_delete_record() { - var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords() + var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(2, events.Length); @@ -48,7 +49,7 @@ public void [Test] public void read_all_backward_from_beginning_of_second_chunk_returns_no_records() { var pos = new TFPos(10000, 10000); - var events = ReadIndex.ReadAllEventsBackward(pos, 100).EventRecords() + var events = ReadIndex.ReadAllEventsBackward(pos, 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(0, events.Length); @@ -56,7 +57,7 @@ public void read_all_backward_from_beginning_of_second_chunk_returns_no_records( [Test] public void read_all_forward_from_beginning_of_2nd_chunk_with_max_1_record_returns_1st_record_from_3rd_chunk() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(10000, 10000), 100).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(10000, 10000), 100, ITransactionFileTracker.NoOp).EventRecords() .Take(1) .Select(r => r.Event) .ToArray(); @@ -66,7 +67,7 @@ public void read_all_forward_from_beginning_of_2nd_chunk_with_max_1_record_retur [Test] public void read_all_forward_with_max_5_records_returns_2_records_from_2nd_chunk_plus_delete_record() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 5).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 5, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(2, events.Length); diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_having_commit_spanning_multiple_chunks.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_having_commit_spanning_multiple_chunks.cs index cda7dce1e1c..c5f115725c1 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_having_commit_spanning_multiple_chunks.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_having_commit_spanning_multiple_chunks.cs @@ -1,5 +1,6 @@ using System; using System.Collections.Generic; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; @@ -60,12 +61,12 @@ protected override void WriteTestScenario() { public void all_chunks_are_merged_and_scavenged() { foreach (var rec in _scavenged) { var chunk = Db.Manager.GetChunkFor(rec.LogPosition); - Assert.IsFalse(chunk.TryReadAt(rec.LogPosition, couldBeScavenged: true).Success); + Assert.IsFalse(chunk.TryReadAt(rec.LogPosition, couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp).Success); } foreach (var rec in _survivors) { var chunk = Db.Manager.GetChunkFor(rec.LogPosition); - var res = chunk.TryReadAt(rec.LogPosition, couldBeScavenged: false); + var res = chunk.TryReadAt(rec.LogPosition, couldBeScavenged: false, tracker: ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(rec, res.LogRecord); } diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_deleted_records.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_deleted_records.cs index 0353829a9df..5f1adc0e9ef 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_deleted_records.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_deleted_records.cs @@ -38,7 +38,7 @@ protected override void WriteTestScenario() { [Test] public void should_be_able_to_read_the_all_stream() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(5, events.Count()); @@ -53,10 +53,10 @@ public void should_be_able_to_read_the_all_stream() { public void should_have_updated_deleted_stream_event_number() { var chunk = Db.Manager.GetChunk(0); var chunkRecords = new List(); - RecordReadResult result = chunk.TryReadFirst(); + RecordReadResult result = chunk.TryReadFirst(ITransactionFileTracker.NoOp); while (result.Success) { chunkRecords.Add(result.LogRecord); - result = chunk.TryReadClosestForward(result.NextPosition); + result = chunk.TryReadClosestForward(result.NextPosition, ITransactionFileTracker.NoOp); } var id = _logFormat.StreamIds.LookupValue(_deletedEventStreamId); diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_transactions.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_transactions.cs index 09e0a1919c3..2b99561d423 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_transactions.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_transactions.cs @@ -58,10 +58,10 @@ public void the_log_records_are_in_first_chunk() { var chunk = Db.Manager.GetChunk(0); var chunkRecords = new List(); - RecordReadResult result = chunk.TryReadFirst(); + RecordReadResult result = chunk.TryReadFirst(ITransactionFileTracker.NoOp); while (result.Success) { chunkRecords.Add(result.LogRecord); - result = chunk.TryReadClosestForward(result.NextPosition); + result = chunk.TryReadClosestForward(result.NextPosition, ITransactionFileTracker.NoOp); } Assert.AreEqual(7, chunkRecords.Count); @@ -72,10 +72,10 @@ public void the_log_records_are_unchanged_in_second_chunk() { var chunk = Db.Manager.GetChunk(1); var chunkRecords = new List(); - RecordReadResult result = chunk.TryReadFirst(); + RecordReadResult result = chunk.TryReadFirst(ITransactionFileTracker.NoOp); while (result.Success) { chunkRecords.Add(result.LogRecord); - result = chunk.TryReadClosestForward(result.NextPosition); + result = chunk.TryReadClosestForward(result.NextPosition, ITransactionFileTracker.NoOp); } Assert.AreEqual(2, chunkRecords.Count); @@ -198,7 +198,7 @@ public void return_correct_range_on_from_end_range_query_for_smaller_stream_with [Test] public void read_all_events_forward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10).Records; + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(6, records.Count); Assert.AreEqual(_p2.EventId, records[0].Event.EventId); Assert.AreEqual(_p4.EventId, records[1].Event.EventId); @@ -211,7 +211,7 @@ public void read_all_events_forward_returns_all_events_in_correct_order() { [Test] public void read_all_events_backward_returns_all_events_in_correct_order() { var pos = GetBackwardReadPos(); - var records = ReadIndex.ReadAllEventsBackward(pos, 10).Records; + var records = ReadIndex.ReadAllEventsBackward(pos, 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(6, records.Count); Assert.AreEqual(_random1.EventId, records[0].Event.EventId); @@ -225,7 +225,7 @@ public void read_all_events_backward_returns_all_events_in_correct_order() { [Test] public void read_all_events_forward_returns_no_transaction_records_when_prepare_position_is_greater_than_last_prepare_in_commit() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(_t1CommitPos, _t1CommitPos), 10).Records; + var records = ReadIndex.ReadAllEventsForward(new TFPos(_t1CommitPos, _t1CommitPos), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(1, records.Count); Assert.AreEqual(_random1.EventId, records[0].Event.EventId); } @@ -233,14 +233,14 @@ public void [Test] public void read_all_events_backwards_returns_nothing_when_prepare_position_is_smaller_than_first_prepare_in_commit() { - var records = ReadIndex.ReadAllEventsBackward(new TFPos(_t2CommitPos, 0), 10).Records; + var records = ReadIndex.ReadAllEventsBackward(new TFPos(_t2CommitPos, 0), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(0, records.Count); } [Test] public void read_all_events_forward_returns_correct_events_starting_in_the_middle_of_tf() { var res1 = ReadIndex.ReadAllEventsForward(new TFPos(_t2CommitPos, _p4.LogPosition), - 10); // end of first commit + 10, ITransactionFileTracker.NoOp); // end of first commit Assert.AreEqual(5, res1.Records.Count); Assert.AreEqual(_p4.EventId, res1.Records[0].Event.EventId); Assert.AreEqual(_p1.EventId, res1.Records[1].Event.EventId); @@ -248,7 +248,7 @@ public void read_all_events_forward_returns_correct_events_starting_in_the_middl Assert.AreEqual(_p5.EventId, res1.Records[3].Event.EventId); Assert.AreEqual(_random1.EventId, res1.Records[4].Event.EventId); - var res2 = ReadIndex.ReadAllEventsBackward(res1.PrevPos, 10); + var res2 = ReadIndex.ReadAllEventsBackward(res1.PrevPos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(1, res2.Records.Count); Assert.AreEqual(_p2.EventId, res2.Records[0].Event.EventId); } @@ -256,7 +256,7 @@ public void read_all_events_forward_returns_correct_events_starting_in_the_middl [Test] public void read_all_events_backward_returns_correct_events_starting_in_the_middle_of_tf() { var pos = new TFPos(_postCommitPos, _p4.LogPosition); // p3 post position - var res1 = ReadIndex.ReadAllEventsBackward(pos, 10); + var res1 = ReadIndex.ReadAllEventsBackward(pos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(4, res1.Records.Count); Assert.AreEqual(_p3.EventId, res1.Records[0].Event.EventId); @@ -264,7 +264,7 @@ public void read_all_events_backward_returns_correct_events_starting_in_the_midd Assert.AreEqual(_p4.EventId, res1.Records[2].Event.EventId); Assert.AreEqual(_p2.EventId, res1.Records[3].Event.EventId); - var res2 = ReadIndex.ReadAllEventsForward(res1.PrevPos, 10); + var res2 = ReadIndex.ReadAllEventsForward(res1.PrevPos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(2, res2.Records.Count); Assert.AreEqual(_p5.EventId, res2.Records[0].Event.EventId); } @@ -276,7 +276,7 @@ public void all_records_can_be_read_sequentially_page_by_page_in_forward_pass() int count = 0; var pos = new TFPos(0, 0); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsForward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsForward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count].EventId, result.Records[0].Event.EventId); pos = result.NextPos; @@ -293,7 +293,7 @@ public void all_records_can_be_read_sequentially_page_by_page_in_backward_pass() int count = 0; var pos = GetBackwardReadPos(); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsBackward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsBackward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count].EventId, result.Records[0].Event.EventId); pos = result.NextPos; @@ -310,14 +310,14 @@ public void position_returned_for_prev_page_when_traversing_forward_allow_to_tra int count = 0; var pos = new TFPos(0, 0); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsForward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsForward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count].EventId, result.Records[0].Event.EventId); var localPos = result.PrevPos; int localCount = 0; IndexReadAllResult localResult; - while ((localResult = ReadIndex.ReadAllEventsBackward(localPos, 1)).Records.Count != 0) { + while ((localResult = ReadIndex.ReadAllEventsBackward(localPos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, localResult.Records.Count); Assert.AreEqual(recs[count - 1 - localCount].EventId, localResult.Records[0].Event.EventId); localPos = localResult.NextPos; @@ -338,14 +338,14 @@ public void position_returned_for_prev_page_when_traversing_backward_allow_to_tr int count = 0; var pos = GetBackwardReadPos(); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsBackward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsBackward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count].EventId, result.Records[0].Event.EventId); var localPos = result.PrevPos; int localCount = 0; IndexReadAllResult localResult; - while ((localResult = ReadIndex.ReadAllEventsForward(localPos, 1)).Records.Count != 0) { + while ((localResult = ReadIndex.ReadAllEventsForward(localPos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, localResult.Records.Count); Assert.AreEqual(recs[count - 1 - localCount].EventId, localResult.Records[0].Event.EventId); localPos = localResult.NextPos; diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_version0_log_records_and_deleted_records.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_version0_log_records_and_deleted_records.cs index 6d2428771a1..4a98492f652 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_version0_log_records_and_deleted_records.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_version0_log_records_and_deleted_records.cs @@ -46,7 +46,7 @@ protected override void WriteTestScenario() { [Test] public void should_be_able_to_read_the_all_stream() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).Records.Select(r => r.Event).ToArray(); + var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).Records.Select(r => r.Event).ToArray(); Assert.AreEqual(5, events.Count()); Assert.AreEqual(_event1.EventId, events[0].EventId); Assert.AreEqual(_event2.EventId, events[1].EventId); @@ -59,10 +59,10 @@ public void should_be_able_to_read_the_all_stream() { public void should_have_updated_deleted_stream_event_number() { var chunk = Db.Manager.GetChunk(0); var chunkRecords = new List(); - RecordReadResult result = chunk.TryReadFirst(); + RecordReadResult result = chunk.TryReadFirst(ITransactionFileTracker.NoOp); while (result.Success) { chunkRecords.Add(result.LogRecord); - result = chunk.TryReadClosestForward(result.NextPosition); + result = chunk.TryReadClosestForward(result.NextPosition, ITransactionFileTracker.NoOp); } var deletedRecord = (PrepareLogRecord)chunkRecords.First(x => x.RecordType == LogRecordType.Prepare @@ -77,10 +77,10 @@ public void should_have_updated_deleted_stream_event_number() { public void the_log_records_are_still_version_0() { var chunk = Db.Manager.GetChunk(0); var chunkRecords = new List(); - RecordReadResult result = chunk.TryReadFirst(); + RecordReadResult result = chunk.TryReadFirst(ITransactionFileTracker.NoOp); while (result.Success) { chunkRecords.Add(result.LogRecord); - result = chunk.TryReadClosestForward(result.NextPosition); + result = chunk.TryReadClosestForward(result.NextPosition, ITransactionFileTracker.NoOp); } Assert.IsTrue(chunkRecords.All(x => x.Version == LogRecordVersion.LogRecordV0)); diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_version0_log_records_using_transactions.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_version0_log_records_using_transactions.cs index 04fde8383dd..df98d75de92 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_version0_log_records_using_transactions.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_scavenging_tfchunk_with_version0_log_records_using_transactions.cs @@ -105,10 +105,10 @@ public void the_log_records_are_still_version_0_in_first_chunk() { var chunk = Db.Manager.GetChunk(0); var chunkRecords = new List(); - RecordReadResult result = chunk.TryReadFirst(); + RecordReadResult result = chunk.TryReadFirst(ITransactionFileTracker.NoOp); while (result.Success) { chunkRecords.Add(result.LogRecord); - result = chunk.TryReadClosestForward(result.NextPosition); + result = chunk.TryReadClosestForward(result.NextPosition, ITransactionFileTracker.NoOp); } Assert.IsTrue(chunkRecords.All(x => x.Version == LogRecordVersion.LogRecordV0)); @@ -120,10 +120,10 @@ public void the_log_records_are_unchanged_in_second_chunk() { var chunk = Db.Manager.GetChunk(1); var chunkRecords = new List(); - RecordReadResult result = chunk.TryReadFirst(); + RecordReadResult result = chunk.TryReadFirst(ITransactionFileTracker.NoOp); while (result.Success) { chunkRecords.Add(result.LogRecord); - result = chunk.TryReadClosestForward(result.NextPosition); + result = chunk.TryReadClosestForward(result.NextPosition, ITransactionFileTracker.NoOp); } Assert.IsTrue(chunkRecords.All(x => x.Version == LogRecordVersion.LogRecordV0)); @@ -247,7 +247,7 @@ public void return_correct_range_on_from_end_range_query_for_smaller_stream_with [Test] public void read_all_events_forward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10).Records; + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(6, records.Count); Assert.AreEqual(_p2.EventId, records[0].Event.EventId); Assert.AreEqual(_p4.EventId, records[1].Event.EventId); @@ -260,7 +260,7 @@ public void read_all_events_forward_returns_all_events_in_correct_order() { [Test] public void read_all_events_backward_returns_all_events_in_correct_order() { var pos = GetBackwardReadPos(); - var records = ReadIndex.ReadAllEventsBackward(pos, 10).Records; + var records = ReadIndex.ReadAllEventsBackward(pos, 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(6, records.Count); Assert.AreEqual(_random1.EventId, records[0].Event.EventId); @@ -274,7 +274,7 @@ public void read_all_events_backward_returns_all_events_in_correct_order() { [Test] public void read_all_events_forward_returns_no_transaction_records_when_prepare_position_is_greater_than_last_prepare_in_commit() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(_t1CommitPos, _t1CommitPos), 10).Records; + var records = ReadIndex.ReadAllEventsForward(new TFPos(_t1CommitPos, _t1CommitPos), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(1, records.Count); Assert.AreEqual(_random1.EventId, records[0].Event.EventId); } @@ -282,14 +282,14 @@ public void [Test] public void read_all_events_backwards_returns_nothing_when_prepare_position_is_smaller_than_first_prepare_in_commit() { - var records = ReadIndex.ReadAllEventsBackward(new TFPos(_t2CommitPos, 0), 10).Records; + var records = ReadIndex.ReadAllEventsBackward(new TFPos(_t2CommitPos, 0), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(0, records.Count); } [Test] public void read_all_events_forward_returns_correct_events_starting_in_the_middle_of_tf() { var res1 = ReadIndex.ReadAllEventsForward(new TFPos(_t2CommitPos, _p4.LogPosition), - 10); // end of first commit + 10, ITransactionFileTracker.NoOp); // end of first commit Assert.AreEqual(5, res1.Records.Count); Assert.AreEqual(_p4.EventId, res1.Records[0].Event.EventId); Assert.AreEqual(_p1.EventId, res1.Records[1].Event.EventId); @@ -297,7 +297,7 @@ public void read_all_events_forward_returns_correct_events_starting_in_the_middl Assert.AreEqual(_p5.EventId, res1.Records[3].Event.EventId); Assert.AreEqual(_random1.EventId, res1.Records[4].Event.EventId); - var res2 = ReadIndex.ReadAllEventsBackward(res1.PrevPos, 10); + var res2 = ReadIndex.ReadAllEventsBackward(res1.PrevPos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(1, res2.Records.Count); Assert.AreEqual(_p2.EventId, res2.Records[0].Event.EventId); } @@ -305,7 +305,7 @@ public void read_all_events_forward_returns_correct_events_starting_in_the_middl [Test] public void read_all_events_backward_returns_correct_events_starting_in_the_middle_of_tf() { var pos = new TFPos(_postCommitPos, _p4.LogPosition); // p3 post position - var res1 = ReadIndex.ReadAllEventsBackward(pos, 10); + var res1 = ReadIndex.ReadAllEventsBackward(pos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(4, res1.Records.Count); Assert.AreEqual(_p3.EventId, res1.Records[0].Event.EventId); @@ -313,7 +313,7 @@ public void read_all_events_backward_returns_correct_events_starting_in_the_midd Assert.AreEqual(_p4.EventId, res1.Records[2].Event.EventId); Assert.AreEqual(_p2.EventId, res1.Records[3].Event.EventId); - var res2 = ReadIndex.ReadAllEventsForward(res1.PrevPos, 10); + var res2 = ReadIndex.ReadAllEventsForward(res1.PrevPos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(2, res2.Records.Count); Assert.AreEqual(_p5.EventId, res2.Records[0].Event.EventId); } @@ -325,7 +325,7 @@ public void all_records_can_be_read_sequentially_page_by_page_in_forward_pass() int count = 0; var pos = new TFPos(0, 0); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsForward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsForward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count].EventId, result.Records[0].Event.EventId); pos = result.NextPos; @@ -342,7 +342,7 @@ public void all_records_can_be_read_sequentially_page_by_page_in_backward_pass() int count = 0; var pos = GetBackwardReadPos(); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsBackward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsBackward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count].EventId, result.Records[0].Event.EventId); pos = result.NextPos; @@ -359,14 +359,14 @@ public void position_returned_for_prev_page_when_traversing_forward_allow_to_tra int count = 0; var pos = new TFPos(0, 0); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsForward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsForward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count].EventId, result.Records[0].Event.EventId); var localPos = result.PrevPos; int localCount = 0; IndexReadAllResult localResult; - while ((localResult = ReadIndex.ReadAllEventsBackward(localPos, 1)).Records.Count != 0) { + while ((localResult = ReadIndex.ReadAllEventsBackward(localPos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, localResult.Records.Count); Assert.AreEqual(recs[count - 1 - localCount].EventId, localResult.Records[0].Event.EventId); localPos = localResult.NextPos; @@ -387,14 +387,14 @@ public void position_returned_for_prev_page_when_traversing_backward_allow_to_tr int count = 0; var pos = GetBackwardReadPos(); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsBackward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsBackward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count].EventId, result.Records[0].Event.EventId); var localPos = result.PrevPos; int localCount = 0; IndexReadAllResult localResult; - while ((localResult = ReadIndex.ReadAllEventsForward(localPos, 1)).Records.Count != 0) { + while ((localResult = ReadIndex.ReadAllEventsForward(localPos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, localResult.Records.Count); Assert.AreEqual(recs[count - 1 - localCount].EventId, localResult.Records[0].Event.EventId); localPos = localResult.NextPos; diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_and_all_events_and_metaevents_are_in_one_chunk.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_and_all_events_and_metaevents_are_in_one_chunk.cs index 2e0fa356520..7172ef47909 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_and_all_events_and_metaevents_are_in_one_chunk.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_and_all_events_and_metaevents_are_in_one_chunk.cs @@ -1,6 +1,7 @@ using System.Linq; using EventStore.Core.Data; using EventStore.Core.Tests.TransactionLog.Scavenging.Helpers; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -59,18 +60,18 @@ public void the_metastream_is_absent_logically() { [Test] public void the_stream_is_absent_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); - Assert.IsEmpty(ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000).Records + Assert.IsEmpty(ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Where(x => x.Event.EventStreamId == "test")); - Assert.IsEmpty(ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records + Assert.IsEmpty(ReadIndex.ReadAllEventsBackward(headOfTf, 1000, ITransactionFileTracker.NoOp).Records .Where(x => x.Event.EventStreamId == "test")); } [Test] public void the_metastream_is_absent_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); - Assert.IsEmpty(ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000).Records + Assert.IsEmpty(ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Where(x => x.Event.EventStreamId == "$$test")); - Assert.IsEmpty(ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records + Assert.IsEmpty(ReadIndex.ReadAllEventsBackward(headOfTf, 1000, ITransactionFileTracker.NoOp).Records .Where(x => x.Event.EventStreamId == "$$test")); } } diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_events_are_in_multiple_chunks.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_events_are_in_multiple_chunks.cs index 4fdaa0dc6a0..906c7be68ee 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_events_are_in_multiple_chunks.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_events_are_in_multiple_chunks.cs @@ -1,6 +1,7 @@ using System.Linq; using EventStore.Core.Data; using EventStore.Core.Tests.TransactionLog.Scavenging.Helpers; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -74,20 +75,20 @@ public void the_metastream_is_present_logically() { public void the_stream_is_present_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); Assert.AreEqual(1, - ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000).Records + ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Count(x => x.Event.EventStreamId == "test")); Assert.AreEqual(1, - ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records.Count(x => x.Event.EventStreamId == "test")); + ReadIndex.ReadAllEventsBackward(headOfTf, 1000, ITransactionFileTracker.NoOp).Records.Count(x => x.Event.EventStreamId == "test")); } [Test] public void the_metastream_is_present_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); Assert.AreEqual(1, - ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000).Records + ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Count(x => x.Event.EventStreamId == "$$test")); Assert.AreEqual(1, - ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records.Count(x => x.Event.EventStreamId == "$$test")); + ReadIndex.ReadAllEventsBackward(headOfTf, 1000, ITransactionFileTracker.NoOp).Records.Count(x => x.Event.EventStreamId == "$$test")); } } } diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_events_are_in_multiple_chunks_2.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_events_are_in_multiple_chunks_2.cs index da046b7aca6..c3733fbadbc 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_events_are_in_multiple_chunks_2.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_events_are_in_multiple_chunks_2.cs @@ -2,6 +2,7 @@ using System.Linq; using EventStore.Core.Data; using EventStore.Core.Tests.TransactionLog.Scavenging.Helpers; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using EventStore.LogCommon; using NUnit.Framework; @@ -78,20 +79,20 @@ public void the_metastream_is_present_logically() { public void the_stream_is_present_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); Assert.AreEqual(1, - ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000).Records + ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Count(x => x.Event.EventStreamId == "test")); Assert.AreEqual(1, - ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records.Count(x => x.Event.EventStreamId == "test")); + ReadIndex.ReadAllEventsBackward(headOfTf, 1000, ITransactionFileTracker.NoOp).Records.Count(x => x.Event.EventStreamId == "test")); } [Test] public void the_metastream_is_present_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); Assert.AreEqual(1, - ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000).Records + ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Count(x => x.Event.EventStreamId == "$$test")); Assert.AreEqual(1, - ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records.Count(x => x.Event.EventStreamId == "$$test")); + ReadIndex.ReadAllEventsBackward(headOfTf, 1000, ITransactionFileTracker.NoOp).Records.Count(x => x.Event.EventStreamId == "$$test")); } } } diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_metaevents_are_in_multiple_chunks.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_metaevents_are_in_multiple_chunks.cs index b1e34057cc2..3cf92a2a83e 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_metaevents_are_in_multiple_chunks.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_but_some_metaevents_are_in_multiple_chunks.cs @@ -1,6 +1,7 @@ using System.Linq; using EventStore.Core.Data; using EventStore.Core.Tests.TransactionLog.Scavenging.Helpers; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -76,20 +77,20 @@ public void the_metastream_is_present_logically() { public void the_stream_is_present_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); Assert.AreEqual(1, - ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000).Records + ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Count(x => x.Event.EventStreamId == "test")); Assert.AreEqual(1, - ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records.Count(x => x.Event.EventStreamId == "test")); + ReadIndex.ReadAllEventsBackward(headOfTf, 1000, ITransactionFileTracker.NoOp).Records.Count(x => x.Event.EventStreamId == "test")); } [Test] public void the_metastream_is_present_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); Assert.AreEqual(1, - ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000).Records + ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Count(x => x.Event.EventStreamId == "$$test")); Assert.AreEqual(1, - ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records.Count(x => x.Event.EventStreamId == "$$test")); + ReadIndex.ReadAllEventsBackward(headOfTf, 1000, ITransactionFileTracker.NoOp).Records.Count(x => x.Event.EventStreamId == "$$test")); } } } diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_with_log_version_0_but_some_events_are_in_multiple_chunks.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_with_log_version_0_but_some_events_are_in_multiple_chunks.cs index 8f73ac2bf3b..768f7afd711 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_with_log_version_0_but_some_events_are_in_multiple_chunks.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_and_temp_with_log_version_0_but_some_events_are_in_multiple_chunks.cs @@ -2,6 +2,7 @@ using System.Linq; using EventStore.Core.Data; using EventStore.Core.Tests.TransactionLog.Scavenging.Helpers; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using EventStore.LogCommon; using NUnit.Framework; @@ -73,20 +74,20 @@ public void the_metastream_is_present_logically() { public void the_stream_is_present_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); Assert.AreEqual(1, - ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000).Records + ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Count(x => x.Event.EventStreamId == "test")); Assert.AreEqual(1, - ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records.Count(x => x.Event.EventStreamId == "test")); + ReadIndex.ReadAllEventsBackward(headOfTf, 1000, ITransactionFileTracker.NoOp).Records.Count(x => x.Event.EventStreamId == "test")); } [Test] public void the_metastream_is_present_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); Assert.AreEqual(1, - ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000).Records + ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Count(x => x.Event.EventStreamId == "$$test"), "Read $$test stream forward"); Assert.AreEqual(1, - ReadIndex.ReadAllEventsBackward(headOfTf, 10).Records.Count(x => x.Event.EventStreamId == "$$test"), + ReadIndex.ReadAllEventsBackward(headOfTf, 10, ITransactionFileTracker.NoOp).Records.Count(x => x.Event.EventStreamId == "$$test"), "Read $$test stream backward"); } } diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_with_log_record_version_0.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_with_log_record_version_0.cs index 7739f681281..ea78d3b6c24 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_with_log_record_version_0.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_with_log_record_version_0.cs @@ -1,6 +1,7 @@ using System.Linq; using EventStore.Core.Data; using EventStore.Core.Tests.TransactionLog.Scavenging.Helpers; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using EventStore.LogCommon; using NUnit.Framework; @@ -53,18 +54,18 @@ public void the_metastream_is_absent_logically() { [Test] public void the_stream_is_absent_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); - Assert.IsEmpty(ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000).Records + Assert.IsEmpty(ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Where(x => x.Event.EventStreamId == "test")); - Assert.IsEmpty(ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records + Assert.IsEmpty(ReadIndex.ReadAllEventsBackward(headOfTf, 1000, ITransactionFileTracker.NoOp).Records .Where(x => x.Event.EventStreamId == "test")); } [Test] public void the_metastream_is_absent_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); - Assert.IsEmpty(ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000).Records + Assert.IsEmpty(ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Where(x => x.Event.EventStreamId == "$$test")); - Assert.IsEmpty(ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records + Assert.IsEmpty(ReadIndex.ReadAllEventsBackward(headOfTf, 1000, ITransactionFileTracker.NoOp).Records .Where(x => x.Event.EventStreamId == "$$test")); } } diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_with_mixed_log_record_version_0_and_version_1.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_with_mixed_log_record_version_0_and_version_1.cs index 96d84c9eaa8..68a36e254c0 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_with_mixed_log_record_version_0_and_version_1.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_stream_is_softdeleted_with_mixed_log_record_version_0_and_version_1.cs @@ -1,6 +1,7 @@ using System.Linq; using EventStore.Core.Data; using EventStore.Core.Tests.TransactionLog.Scavenging.Helpers; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using EventStore.LogCommon; using NUnit.Framework; @@ -76,18 +77,18 @@ public void the_metastream_is_absent_logically() { [Test] public void the_stream_is_absent_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); - Assert.IsEmpty(ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000).Records + Assert.IsEmpty(ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Where(x => x.Event.EventStreamId == _deletedStream)); - Assert.IsEmpty(ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records + Assert.IsEmpty(ReadIndex.ReadAllEventsBackward(headOfTf, 1000, ITransactionFileTracker.NoOp).Records .Where(x => x.Event.EventStreamId == _deletedStream)); } [Test] public void the_metastream_is_absent_physically() { var headOfTf = new TFPos(Db.Config.WriterCheckpoint.Read(), Db.Config.WriterCheckpoint.Read()); - Assert.IsEmpty(ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000).Records + Assert.IsEmpty(ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 1000, ITransactionFileTracker.NoOp).Records .Where(x => x.Event.EventStreamId == _deletedMetaStream)); - Assert.IsEmpty(ReadIndex.ReadAllEventsBackward(headOfTf, 1000).Records + Assert.IsEmpty(ReadIndex.ReadAllEventsBackward(headOfTf, 1000, ITransactionFileTracker.NoOp).Records .Where(x => x.Event.EventStreamId == _deletedMetaStream)); } diff --git a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_writing_delete_prepare_without_commit_on_stream_spanning_through_2_chunks_in_db_with_2_chunks.cs b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_writing_delete_prepare_without_commit_on_stream_spanning_through_2_chunks_in_db_with_2_chunks.cs index 311b52e7d9b..c720dcee3cf 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_writing_delete_prepare_without_commit_on_stream_spanning_through_2_chunks_in_db_with_2_chunks.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Scavenge/when_writing_delete_prepare_without_commit_on_stream_spanning_through_2_chunks_in_db_with_2_chunks.cs @@ -2,6 +2,7 @@ using System.Linq; using EventStore.Core.Data; using EventStore.Core.Services; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -56,7 +57,7 @@ public void read_stream_events_backward_should_return_stream_deleted() { [Test] public void read_all_forward_returns_all_events() { - var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100).EventRecords() + var events = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(2, events.Length); @@ -66,7 +67,7 @@ public void read_all_forward_returns_all_events() { [Test] public void read_all_backward_returns_all_events() { - var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).EventRecords() + var events = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).EventRecords() .Select(r => r.Event) .ToArray(); Assert.AreEqual(2, events.Length); diff --git a/src/EventStore.Core.Tests/Services/Storage/SimpleDbTestScenario.cs b/src/EventStore.Core.Tests/Services/Storage/SimpleDbTestScenario.cs index 13ffa55e278..de08494bbed 100644 --- a/src/EventStore.Core.Tests/Services/Storage/SimpleDbTestScenario.cs +++ b/src/EventStore.Core.Tests/Services/Storage/SimpleDbTestScenario.cs @@ -62,7 +62,7 @@ public override async Task TestFixtureSetUp() { var emptyStreamId = _logFormat.EmptyStreamId; TableIndex = new TableIndex(indexDirectory, lowHasher, highHasher, emptyStreamId, () => new HashListMemTable(PTableVersions.IndexV2, MaxEntriesInMemTable * 2), - () => new TFReaderLease(readers), + _ => new TFReaderLease(readers, ITransactionFileTracker.NoOp), PTableVersions.IndexV2, int.MaxValue, Constants.PTableMaxReaderCountDefault, @@ -91,6 +91,7 @@ public override async Task TestFixtureSetUp() { indexCheckpoint: DbRes.Db.Config.IndexCheckpoint, indexStatusTracker: new IndexStatusTracker.NoOp(), indexTracker: new IndexTracker.NoOp(), + tfTrackers: ITransactionFileTrackerFactory.NoOp, cacheTracker: new CacheHitsMissesTracker.NoOp()); readIndex.IndexCommitter.Init(DbRes.Db.Config.ChaserCheckpoint.Read()); diff --git a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_multievent_sequential_write_request_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_multievent_sequential_write_request_read_index_should.cs index 6242b58eda6..6d5b81b02f6 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_multievent_sequential_write_request_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_multievent_sequential_write_request_read_index_should.cs @@ -1,5 +1,6 @@ using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -87,7 +88,7 @@ public void return_correct_range_on_from_end_range_query_for_stream_with_from_en [Test] public void read_all_events_forward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10).Records; + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(3, records.Count); Assert.AreEqual(_p1, records[0].Event); @@ -97,7 +98,7 @@ public void read_all_events_forward_returns_all_events_in_correct_order() { [Test] public void read_all_events_backward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100).Records; + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(3, records.Count); Assert.AreEqual(_p1, records[2].Event); diff --git a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_and_some_uncommited_prepares_in_the_end_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_and_some_uncommited_prepares_in_the_end_read_index_should.cs index 225f96da755..20a284d72fd 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_and_some_uncommited_prepares_in_the_end_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_and_some_uncommited_prepares_in_the_end_read_index_should.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; @@ -60,7 +61,7 @@ protected override void WriteTestScenario() { [Test] public void read_all_events_forward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10).Records; + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(5, records.Count); Assert.AreEqual(_p2, records[0].Event); @@ -72,7 +73,7 @@ public void read_all_events_forward_returns_all_events_in_correct_order() { [Test] public void read_all_events_backward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 10).Records; + var records = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(5, records.Count); Assert.AreEqual(_p5, records[0].Event); @@ -85,20 +86,20 @@ public void read_all_events_backward_returns_all_events_in_correct_order() { [Test] public void read_all_events_forward_returns_nothing_when_prepare_position_is_greater_than_last_prepare_in_commit() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(_t1CommitPos, _t1CommitPos), 10).Records; + var records = ReadIndex.ReadAllEventsForward(new TFPos(_t1CommitPos, _t1CommitPos), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(0, records.Count); } [Test] public void read_all_events_backwards_returns_nothing_when_prepare_position_is_smaller_than_first_prepare_in_commit() { - var records = ReadIndex.ReadAllEventsBackward(new TFPos(_t2CommitPos, 0), 10).Records; + var records = ReadIndex.ReadAllEventsBackward(new TFPos(_t2CommitPos, 0), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(0, records.Count); } [Test] public void read_all_events_forward_returns_correct_events_starting_in_the_middle_of_tf() { - var res1 = ReadIndex.ReadAllEventsForward(new TFPos(_t2CommitPos, _p4.LogPosition), 10); + var res1 = ReadIndex.ReadAllEventsForward(new TFPos(_t2CommitPos, _p4.LogPosition), 10, ITransactionFileTracker.NoOp); Assert.AreEqual(4, res1.Records.Count); Assert.AreEqual(_p4, res1.Records[0].Event); @@ -106,7 +107,7 @@ public void read_all_events_forward_returns_correct_events_starting_in_the_middl Assert.AreEqual(_p3, res1.Records[2].Event); Assert.AreEqual(_p5, res1.Records[3].Event); - var res2 = ReadIndex.ReadAllEventsBackward(res1.PrevPos, 10); + var res2 = ReadIndex.ReadAllEventsBackward(res1.PrevPos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(1, res2.Records.Count); Assert.AreEqual(_p2, res2.Records[0].Event); } @@ -114,7 +115,7 @@ public void read_all_events_forward_returns_correct_events_starting_in_the_middl [Test] public void read_all_events_backward_returns_correct_events_starting_in_the_middle_of_tf() { var pos = new TFPos(_pos6, _p4.LogPosition); // p3 post-pos - var res1 = ReadIndex.ReadAllEventsBackward(pos, 10); + var res1 = ReadIndex.ReadAllEventsBackward(pos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(4, res1.Records.Count); Assert.AreEqual(_p3, res1.Records[0].Event); @@ -122,7 +123,7 @@ public void read_all_events_backward_returns_correct_events_starting_in_the_midd Assert.AreEqual(_p4, res1.Records[2].Event); Assert.AreEqual(_p2, res1.Records[3].Event); - var res2 = ReadIndex.ReadAllEventsForward(res1.PrevPos, 10); + var res2 = ReadIndex.ReadAllEventsForward(res1.PrevPos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(1, res2.Records.Count); Assert.AreEqual(_p5, res2.Records[0].Event); } @@ -134,7 +135,7 @@ public void all_records_can_be_read_sequentially_page_by_page_in_forward_pass() int count = 0; var pos = new TFPos(0, 0); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsForward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsForward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); pos = result.NextPos; @@ -151,7 +152,7 @@ public void all_records_can_be_read_sequentially_page_by_page_in_backward_pass() int count = 0; var pos = GetBackwardReadPos(); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsBackward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsBackward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); pos = result.NextPos; @@ -168,14 +169,14 @@ public void position_returned_for_prev_page_when_traversing_forward_allow_to_tra int count = 0; var pos = new TFPos(0, 0); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsForward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsForward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); var localPos = result.PrevPos; int localCount = 0; IndexReadAllResult localResult; - while ((localResult = ReadIndex.ReadAllEventsBackward(localPos, 1)).Records.Count != 0) { + while ((localResult = ReadIndex.ReadAllEventsBackward(localPos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, localResult.Records.Count); Assert.AreEqual(recs[count - 1 - localCount], localResult.Records[0].Event); localPos = localResult.NextPos; @@ -196,14 +197,14 @@ public void position_returned_for_prev_page_when_traversing_backward_allow_to_tr int count = 0; var pos = GetBackwardReadPos(); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsBackward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsBackward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); var localPos = result.PrevPos; int localCount = 0; IndexReadAllResult localResult; - while ((localResult = ReadIndex.ReadAllEventsForward(localPos, 1)).Records.Count != 0) { + while ((localResult = ReadIndex.ReadAllEventsForward(localPos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, localResult.Records.Count); Assert.AreEqual(recs[count - 1 - localCount], localResult.Records[0].Event); localPos = localResult.NextPos; @@ -220,14 +221,14 @@ public void position_returned_for_prev_page_when_traversing_backward_allow_to_tr [Test] public void reading_all_forward_at_position_with_no_commits_after_returns_prev_pos_that_allows_to_traverse_back() { - var res1 = ReadIndex.ReadAllEventsForward(new TFPos(_pos6, 0), 100); + var res1 = ReadIndex.ReadAllEventsForward(new TFPos(_pos6, 0), 100, ITransactionFileTracker.NoOp); Assert.AreEqual(0, res1.Records.Count); var recs = new[] {_p5, _p3, _p1, _p4, _p2}; // in reverse committed order int count = 0; IndexReadAllResult result; TFPos pos = res1.PrevPos; - while ((result = ReadIndex.ReadAllEventsBackward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsBackward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); pos = result.NextPos; @@ -239,14 +240,14 @@ public void [Test] public void reading_all_forward_at_the_very_end_returns_prev_pos_that_allows_to_traverse_back() { - var res1 = ReadIndex.ReadAllEventsForward(new TFPos(Db.Config.WriterCheckpoint.Read(), 0), 100); + var res1 = ReadIndex.ReadAllEventsForward(new TFPos(Db.Config.WriterCheckpoint.Read(), 0), 100, ITransactionFileTracker.NoOp); Assert.AreEqual(0, res1.Records.Count); var recs = new[] {_p5, _p3, _p1, _p4, _p2}; // in reverse committed order int count = 0; IndexReadAllResult result; TFPos pos = res1.PrevPos; - while ((result = ReadIndex.ReadAllEventsBackward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsBackward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); pos = result.NextPos; @@ -259,14 +260,14 @@ public void reading_all_forward_at_the_very_end_returns_prev_pos_that_allows_to_ [Test] public void reading_all_backward_at_position_with_no_commits_before_returns_prev_pos_that_allows_to_traverse_forward() { - var res1 = ReadIndex.ReadAllEventsBackward(new TFPos(_t2CommitPos, int.MaxValue), 100); + var res1 = ReadIndex.ReadAllEventsBackward(new TFPos(_t2CommitPos, int.MaxValue), 100, ITransactionFileTracker.NoOp); Assert.AreEqual(0, res1.Records.Count); var recs = new[] {_p2, _p4, _p1, _p3, _p5}; int count = 0; IndexReadAllResult result; TFPos pos = res1.PrevPos; - while ((result = ReadIndex.ReadAllEventsForward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsForward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); pos = result.NextPos; @@ -278,14 +279,14 @@ public void [Test] public void reading_all_backward_at_the_very_beginning_returns_prev_pos_that_allows_to_traverse_forward() { - var res1 = ReadIndex.ReadAllEventsBackward(new TFPos(0, int.MaxValue), 100); + var res1 = ReadIndex.ReadAllEventsBackward(new TFPos(0, int.MaxValue), 100, ITransactionFileTracker.NoOp); Assert.AreEqual(0, res1.Records.Count); var recs = new[] {_p2, _p4, _p1, _p3, _p5}; int count = 0; IndexReadAllResult result; TFPos pos = res1.PrevPos; - while ((result = ReadIndex.ReadAllEventsForward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsForward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); pos = result.NextPos; diff --git a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_and_some_uncommited_prepares_spanning_few_chunks_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_and_some_uncommited_prepares_spanning_few_chunks_read_index_should.cs index d2bd47b826c..3b9ab810377 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_and_some_uncommited_prepares_spanning_few_chunks_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_and_some_uncommited_prepares_spanning_few_chunks_read_index_should.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; @@ -61,7 +62,7 @@ protected override void WriteTestScenario() { [Test] public void read_all_events_forward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10).Records; + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(5, records.Count); Assert.AreEqual(_p2, records[0].Event); @@ -74,7 +75,7 @@ public void read_all_events_forward_returns_all_events_in_correct_order() { [Test] public void read_all_events_backward_returns_all_events_in_correct_order() { var pos = GetBackwardReadPos(); - var records = ReadIndex.ReadAllEventsBackward(pos, 10).Records; + var records = ReadIndex.ReadAllEventsBackward(pos, 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(5, records.Count); Assert.AreEqual(_p5, records[0].Event); @@ -87,20 +88,20 @@ public void read_all_events_backward_returns_all_events_in_correct_order() { [Test] public void read_all_events_forward_returns_nothing_when_prepare_position_is_greater_than_last_prepare_in_commit() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(_t1CommitPos, _t1CommitPos), 10).Records; + var records = ReadIndex.ReadAllEventsForward(new TFPos(_t1CommitPos, _t1CommitPos), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(0, records.Count); } [Test] public void read_all_events_backwards_returns_nothing_when_prepare_position_is_smaller_than_first_prepare_in_commit() { - var records = ReadIndex.ReadAllEventsBackward(new TFPos(_t2CommitPos, 0), 10).Records; + var records = ReadIndex.ReadAllEventsBackward(new TFPos(_t2CommitPos, 0), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(0, records.Count); } [Test] public void read_all_events_forward_returns_correct_events_starting_in_the_middle_of_tf() { - var res1 = ReadIndex.ReadAllEventsForward(new TFPos(_t2CommitPos, _p4.LogPosition), 10); + var res1 = ReadIndex.ReadAllEventsForward(new TFPos(_t2CommitPos, _p4.LogPosition), 10, ITransactionFileTracker.NoOp); Assert.AreEqual(4, res1.Records.Count); Assert.AreEqual(_p4, res1.Records[0].Event); @@ -108,7 +109,7 @@ public void read_all_events_forward_returns_correct_events_starting_in_the_middl Assert.AreEqual(_p3, res1.Records[2].Event); Assert.AreEqual(_p5, res1.Records[3].Event); - var res2 = ReadIndex.ReadAllEventsBackward(res1.PrevPos, 10); + var res2 = ReadIndex.ReadAllEventsBackward(res1.PrevPos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(1, res2.Records.Count); Assert.AreEqual(_p2, res2.Records[0].Event); } @@ -116,7 +117,7 @@ public void read_all_events_forward_returns_correct_events_starting_in_the_middl [Test] public void read_all_events_backward_returns_correct_events_starting_in_the_middle_of_tf() { var pos = new TFPos(_pos6, _p4.LogPosition); // p3 post-pos - var res1 = ReadIndex.ReadAllEventsBackward(pos, 10); + var res1 = ReadIndex.ReadAllEventsBackward(pos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(4, res1.Records.Count); Assert.AreEqual(_p3, res1.Records[0].Event); @@ -124,7 +125,7 @@ public void read_all_events_backward_returns_correct_events_starting_in_the_midd Assert.AreEqual(_p4, res1.Records[2].Event); Assert.AreEqual(_p2, res1.Records[3].Event); - var res2 = ReadIndex.ReadAllEventsForward(res1.PrevPos, 10); + var res2 = ReadIndex.ReadAllEventsForward(res1.PrevPos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(1, res2.Records.Count); Assert.AreEqual(_p5, res2.Records[0].Event); } @@ -136,7 +137,7 @@ public void all_records_can_be_read_sequentially_page_by_page_in_forward_pass() int count = 0; var pos = new TFPos(0, 0); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsForward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsForward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); pos = result.NextPos; @@ -153,7 +154,7 @@ public void all_records_can_be_read_sequentially_page_by_page_in_backward_pass() int count = 0; var pos = GetBackwardReadPos(); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsBackward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsBackward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); pos = result.NextPos; @@ -170,14 +171,14 @@ public void position_returned_for_prev_page_when_traversing_forward_allow_to_tra int count = 0; var pos = new TFPos(0, 0); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsForward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsForward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); var localPos = result.PrevPos; int localCount = 0; IndexReadAllResult localResult; - while ((localResult = ReadIndex.ReadAllEventsBackward(localPos, 1)).Records.Count != 0) { + while ((localResult = ReadIndex.ReadAllEventsBackward(localPos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, localResult.Records.Count); Assert.AreEqual(recs[count - 1 - localCount], localResult.Records[0].Event); localPos = localResult.NextPos; @@ -198,14 +199,14 @@ public void position_returned_for_prev_page_when_traversing_backward_allow_to_tr int count = 0; var pos = GetBackwardReadPos(); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsBackward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsBackward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); var localPos = result.PrevPos; int localCount = 0; IndexReadAllResult localResult; - while ((localResult = ReadIndex.ReadAllEventsForward(localPos, 1)).Records.Count != 0) { + while ((localResult = ReadIndex.ReadAllEventsForward(localPos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, localResult.Records.Count); Assert.AreEqual(recs[count - 1 - localCount], localResult.Records[0].Event); localPos = localResult.NextPos; @@ -222,14 +223,14 @@ public void position_returned_for_prev_page_when_traversing_backward_allow_to_tr [Test] public void reading_all_forward_at_position_with_no_commits_after_returns_prev_pos_that_allows_to_traverse_back() { - var res1 = ReadIndex.ReadAllEventsForward(new TFPos(_pos6, 0), 100); + var res1 = ReadIndex.ReadAllEventsForward(new TFPos(_pos6, 0), 100, ITransactionFileTracker.NoOp); Assert.AreEqual(0, res1.Records.Count); var recs = new[] {_p5, _p3, _p1, _p4, _p2}; // in reverse committed order int count = 0; IndexReadAllResult result; TFPos pos = res1.PrevPos; - while ((result = ReadIndex.ReadAllEventsBackward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsBackward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); pos = result.NextPos; @@ -241,14 +242,14 @@ public void [Test] public void reading_all_forward_at_the_very_end_returns_prev_pos_that_allows_to_traverse_back() { - var res1 = ReadIndex.ReadAllEventsForward(new TFPos(Db.Config.WriterCheckpoint.Read(), 0), 100); + var res1 = ReadIndex.ReadAllEventsForward(new TFPos(Db.Config.WriterCheckpoint.Read(), 0), 100, ITransactionFileTracker.NoOp); Assert.AreEqual(0, res1.Records.Count); var recs = new[] {_p5, _p3, _p1, _p4, _p2}; // in reverse committed order int count = 0; IndexReadAllResult result; TFPos pos = res1.PrevPos; - while ((result = ReadIndex.ReadAllEventsBackward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsBackward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); pos = result.NextPos; @@ -261,14 +262,14 @@ public void reading_all_forward_at_the_very_end_returns_prev_pos_that_allows_to_ [Test] public void reading_all_backward_at_position_with_no_commits_before_returns_prev_pos_that_allows_to_traverse_forward() { - var res1 = ReadIndex.ReadAllEventsBackward(new TFPos(_t2CommitPos, int.MaxValue), 100); + var res1 = ReadIndex.ReadAllEventsBackward(new TFPos(_t2CommitPos, int.MaxValue), 100, ITransactionFileTracker.NoOp); Assert.AreEqual(0, res1.Records.Count); var recs = new[] {_p2, _p4, _p1, _p3, _p5}; int count = 0; IndexReadAllResult result; TFPos pos = res1.PrevPos; - while ((result = ReadIndex.ReadAllEventsForward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsForward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); pos = result.NextPos; @@ -280,14 +281,14 @@ public void [Test] public void reading_all_backward_at_the_very_beginning_returns_prev_pos_that_allows_to_traverse_forward() { - var res1 = ReadIndex.ReadAllEventsBackward(new TFPos(0, int.MaxValue), 100); + var res1 = ReadIndex.ReadAllEventsBackward(new TFPos(0, int.MaxValue), 100, ITransactionFileTracker.NoOp); Assert.AreEqual(0, res1.Records.Count); var recs = new[] {_p2, _p4, _p1, _p3, _p5}; int count = 0; IndexReadAllResult result; TFPos pos = res1.PrevPos; - while ((result = ReadIndex.ReadAllEventsForward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsForward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); pos = result.NextPos; diff --git a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_read_index_should.cs index ebe1b5a0ebe..0b6d62c2ddd 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_read_index_should.cs @@ -1,5 +1,6 @@ using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -160,7 +161,7 @@ public void return_correct_range_on_from_end_range_query_for_smaller_stream_with [Test] public void read_all_events_forward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10).Records; + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(5, records.Count); Assert.AreEqual(_p2, records[0].Event); @@ -173,7 +174,7 @@ public void read_all_events_forward_returns_all_events_in_correct_order() { [Test] public void read_all_events_backward_returns_all_events_in_correct_order() { var pos = GetBackwardReadPos(); - var records = ReadIndex.ReadAllEventsBackward(pos, 10).Records; + var records = ReadIndex.ReadAllEventsBackward(pos, 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(5, records.Count); Assert.AreEqual(_p5, records[0].Event); @@ -186,20 +187,20 @@ public void read_all_events_backward_returns_all_events_in_correct_order() { [Test] public void read_all_events_forward_returns_nothing_when_prepare_position_is_greater_than_last_prepare_in_commit() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(_t1CommitPos, _t1CommitPos), 10).Records; + var records = ReadIndex.ReadAllEventsForward(new TFPos(_t1CommitPos, _t1CommitPos), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(0, records.Count); } [Test] public void read_all_events_backwards_returns_nothing_when_prepare_position_is_smaller_than_first_prepare_in_commit() { - var records = ReadIndex.ReadAllEventsBackward(new TFPos(_t2CommitPos, 0), 10).Records; + var records = ReadIndex.ReadAllEventsBackward(new TFPos(_t2CommitPos, 0), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(0, records.Count); } [Test] public void read_all_events_forward_returns_correct_events_starting_in_the_middle_of_tf() { - var res1 = ReadIndex.ReadAllEventsForward(new TFPos(_t2CommitPos, _p4.LogPosition), 10); + var res1 = ReadIndex.ReadAllEventsForward(new TFPos(_t2CommitPos, _p4.LogPosition), 10, ITransactionFileTracker.NoOp); Assert.AreEqual(4, res1.Records.Count); Assert.AreEqual(_p4, res1.Records[0].Event); @@ -207,7 +208,7 @@ public void read_all_events_forward_returns_correct_events_starting_in_the_middl Assert.AreEqual(_p3, res1.Records[2].Event); Assert.AreEqual(_p5, res1.Records[3].Event); - var res2 = ReadIndex.ReadAllEventsBackward(res1.PrevPos, 10); + var res2 = ReadIndex.ReadAllEventsBackward(res1.PrevPos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(1, res2.Records.Count); Assert.AreEqual(_p2, res2.Records[0].Event); } @@ -215,7 +216,7 @@ public void read_all_events_forward_returns_correct_events_starting_in_the_middl [Test] public void read_all_events_backward_returns_correct_events_starting_in_the_middle_of_tf() { var pos = new TFPos(Db.Config.WriterCheckpoint.Read(), _p4.LogPosition); // p3 post-pos - var res1 = ReadIndex.ReadAllEventsBackward(pos, 10); + var res1 = ReadIndex.ReadAllEventsBackward(pos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(4, res1.Records.Count); Assert.AreEqual(_p3, res1.Records[0].Event); @@ -223,7 +224,7 @@ public void read_all_events_backward_returns_correct_events_starting_in_the_midd Assert.AreEqual(_p4, res1.Records[2].Event); Assert.AreEqual(_p2, res1.Records[3].Event); - var res2 = ReadIndex.ReadAllEventsForward(res1.PrevPos, 10); + var res2 = ReadIndex.ReadAllEventsForward(res1.PrevPos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(1, res2.Records.Count); Assert.AreEqual(_p5, res2.Records[0].Event); } @@ -235,7 +236,7 @@ public void all_records_can_be_read_sequentially_page_by_page_in_forward_pass() int count = 0; var pos = new TFPos(0, 0); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsForward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsForward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); pos = result.NextPos; @@ -252,7 +253,7 @@ public void all_records_can_be_read_sequentially_page_by_page_in_backward_pass() int count = 0; var pos = GetBackwardReadPos(); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsBackward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsBackward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); pos = result.NextPos; @@ -269,14 +270,14 @@ public void position_returned_for_prev_page_when_traversing_forward_allow_to_tra int count = 0; var pos = new TFPos(0, 0); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsForward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsForward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); var localPos = result.PrevPos; int localCount = 0; IndexReadAllResult localResult; - while ((localResult = ReadIndex.ReadAllEventsBackward(localPos, 1)).Records.Count != 0) { + while ((localResult = ReadIndex.ReadAllEventsBackward(localPos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, localResult.Records.Count); Assert.AreEqual(recs[count - 1 - localCount], localResult.Records[0].Event); localPos = localResult.NextPos; @@ -297,14 +298,14 @@ public void position_returned_for_prev_page_when_traversing_backward_allow_to_tr int count = 0; var pos = GetBackwardReadPos(); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsBackward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsBackward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); var localPos = result.PrevPos; int localCount = 0; IndexReadAllResult localResult; - while ((localResult = ReadIndex.ReadAllEventsForward(localPos, 1)).Records.Count != 0) { + while ((localResult = ReadIndex.ReadAllEventsForward(localPos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, localResult.Records.Count); Assert.AreEqual(recs[count - 1 - localCount], localResult.Records[0].Event); localPos = localResult.NextPos; diff --git a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_spanning_few_chunks_read_index_should.cs b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_spanning_few_chunks_read_index_should.cs index 97ca1841cc8..c4d326fc93e 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_spanning_few_chunks_read_index_should.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_having_two_intermingled_transactions_spanning_few_chunks_read_index_should.cs @@ -1,5 +1,6 @@ using EventStore.Core.Data; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; using ReadStreamResult = EventStore.Core.Services.Storage.ReaderIndex.ReadStreamResult; @@ -162,7 +163,7 @@ public void return_correct_range_on_from_end_range_query_for_smaller_stream_with [Test] public void read_all_events_forward_returns_all_events_in_correct_order() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10).Records; + var records = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(5, records.Count); Assert.AreEqual(_p2, records[0].Event); @@ -175,7 +176,7 @@ public void read_all_events_forward_returns_all_events_in_correct_order() { [Test] public void read_all_events_backward_returns_all_events_in_correct_order() { var pos = GetBackwardReadPos(); - var records = ReadIndex.ReadAllEventsBackward(pos, 10).Records; + var records = ReadIndex.ReadAllEventsBackward(pos, 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(5, records.Count); Assert.AreEqual(_p5, records[0].Event); @@ -188,20 +189,20 @@ public void read_all_events_backward_returns_all_events_in_correct_order() { [Test] public void read_all_events_forward_returns_nothing_when_prepare_position_is_greater_than_last_prepare_in_commit() { - var records = ReadIndex.ReadAllEventsForward(new TFPos(_t1CommitPos, _t1CommitPos), 10).Records; + var records = ReadIndex.ReadAllEventsForward(new TFPos(_t1CommitPos, _t1CommitPos), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(0, records.Count); } [Test] public void read_all_events_backwards_returns_nothing_when_prepare_position_is_smaller_than_first_prepare_in_commit() { - var records = ReadIndex.ReadAllEventsBackward(new TFPos(_t2CommitPos, 0), 10).Records; + var records = ReadIndex.ReadAllEventsBackward(new TFPos(_t2CommitPos, 0), 10, ITransactionFileTracker.NoOp).Records; Assert.AreEqual(0, records.Count); } [Test] public void read_all_events_forward_returns_correct_events_starting_in_the_middle_of_tf() { - var res1 = ReadIndex.ReadAllEventsForward(new TFPos(_t2CommitPos, _p4.LogPosition), 10); + var res1 = ReadIndex.ReadAllEventsForward(new TFPos(_t2CommitPos, _p4.LogPosition), 10, ITransactionFileTracker.NoOp); Assert.AreEqual(4, res1.Records.Count); Assert.AreEqual(_p4, res1.Records[0].Event); @@ -209,7 +210,7 @@ public void read_all_events_forward_returns_correct_events_starting_in_the_middl Assert.AreEqual(_p3, res1.Records[2].Event); Assert.AreEqual(_p5, res1.Records[3].Event); - var res2 = ReadIndex.ReadAllEventsBackward(res1.PrevPos, 10); + var res2 = ReadIndex.ReadAllEventsBackward(res1.PrevPos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(1, res2.Records.Count); Assert.AreEqual(_p2, res2.Records[0].Event); } @@ -217,7 +218,7 @@ public void read_all_events_forward_returns_correct_events_starting_in_the_middl [Test] public void read_all_events_backward_returns_correct_events_starting_in_the_middle_of_tf() { var pos = new TFPos(Db.Config.WriterCheckpoint.Read(), _p4.LogPosition); // p3 post-pos - var res1 = ReadIndex.ReadAllEventsBackward(pos, 10); + var res1 = ReadIndex.ReadAllEventsBackward(pos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(4, res1.Records.Count); Assert.AreEqual(_p3, res1.Records[0].Event); @@ -225,7 +226,7 @@ public void read_all_events_backward_returns_correct_events_starting_in_the_midd Assert.AreEqual(_p4, res1.Records[2].Event); Assert.AreEqual(_p2, res1.Records[3].Event); - var res2 = ReadIndex.ReadAllEventsForward(res1.PrevPos, 10); + var res2 = ReadIndex.ReadAllEventsForward(res1.PrevPos, 10, ITransactionFileTracker.NoOp); Assert.AreEqual(1, res2.Records.Count); Assert.AreEqual(_p5, res2.Records[0].Event); } @@ -237,7 +238,7 @@ public void all_records_can_be_read_sequentially_page_by_page_in_forward_pass() int count = 0; var pos = new TFPos(0, 0); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsForward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsForward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); pos = result.NextPos; @@ -254,7 +255,7 @@ public void all_records_can_be_read_sequentially_page_by_page_in_backward_pass() int count = 0; var pos = GetBackwardReadPos(); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsBackward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsBackward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); pos = result.NextPos; @@ -271,14 +272,14 @@ public void position_returned_for_prev_page_when_traversing_forward_allow_to_tra int count = 0; var pos = new TFPos(0, 0); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsForward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsForward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); var localPos = result.PrevPos; int localCount = 0; IndexReadAllResult localResult; - while ((localResult = ReadIndex.ReadAllEventsBackward(localPos, 1)).Records.Count != 0) { + while ((localResult = ReadIndex.ReadAllEventsBackward(localPos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, localResult.Records.Count); Assert.AreEqual(recs[count - 1 - localCount], localResult.Records[0].Event); localPos = localResult.NextPos; @@ -299,14 +300,14 @@ public void position_returned_for_prev_page_when_traversing_backward_allow_to_tr int count = 0; var pos = GetBackwardReadPos(); IndexReadAllResult result; - while ((result = ReadIndex.ReadAllEventsBackward(pos, 1)).Records.Count != 0) { + while ((result = ReadIndex.ReadAllEventsBackward(pos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, result.Records.Count); Assert.AreEqual(recs[count], result.Records[0].Event); var localPos = result.PrevPos; int localCount = 0; IndexReadAllResult localResult; - while ((localResult = ReadIndex.ReadAllEventsForward(localPos, 1)).Records.Count != 0) { + while ((localResult = ReadIndex.ReadAllEventsForward(localPos, 1, ITransactionFileTracker.NoOp)).Records.Count != 0) { Assert.AreEqual(1, localResult.Records.Count); Assert.AreEqual(recs[count - 1 - localCount], localResult.Records[0].Event); localPos = localResult.NextPos; diff --git a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_rebuilding_index_for_partially_persisted_transaction.cs b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_rebuilding_index_for_partially_persisted_transaction.cs index 45f9f316bfa..511c830f73e 100644 --- a/src/EventStore.Core.Tests/Services/Storage/Transactions/when_rebuilding_index_for_partially_persisted_transaction.cs +++ b/src/EventStore.Core.Tests/Services/Storage/Transactions/when_rebuilding_index_for_partially_persisted_transaction.cs @@ -36,7 +36,7 @@ public override async Task TestFixtureSetUp() { var emptyStreamId = _logFormat.EmptyStreamId; TableIndex = new TableIndex(GetFilePathFor("index"), lowHasher, highHasher, emptyStreamId, () => new HashListMemTable(PTableVersions.IndexV2, maxSize: MaxEntriesInMemTable * 2), - () => new TFReaderLease(readers), + _ => new TFReaderLease(readers, ITransactionFileTracker.NoOp), PTableVersions.IndexV2, 5, Constants.PTableMaxReaderCountDefault, MaxEntriesInMemTable); @@ -62,6 +62,7 @@ public override async Task TestFixtureSetUp() { indexCheckpoint: Db.Config.IndexCheckpoint, indexStatusTracker: new IndexStatusTracker.NoOp(), indexTracker: new IndexTracker.NoOp(), + tfTrackers: ITransactionFileTrackerFactory.NoOp, cacheTracker: new CacheHitsMissesTracker.NoOp()); readIndex.IndexCommitter.Init(ChaserCheckpoint.Read()); ReadIndex = readIndex; diff --git a/src/EventStore.Core.Tests/Services/Storage/WriteEventsToIndexScenario.cs b/src/EventStore.Core.Tests/Services/Storage/WriteEventsToIndexScenario.cs index 749588597a7..977888cde96 100644 --- a/src/EventStore.Core.Tests/Services/Storage/WriteEventsToIndexScenario.cs +++ b/src/EventStore.Core.Tests/Services/Storage/WriteEventsToIndexScenario.cs @@ -148,10 +148,11 @@ public override async Task TestFixtureSetUp() { _streamNames = _logFormat.StreamNames; _systemStreams = _logFormat.SystemStreams; _indexWriter = new IndexWriter(_indexBackend, _indexReader, _streamIds, _streamNames, - _systemStreams, emptyStreamId, _sizer); + _systemStreams, emptyStreamId, ITransactionFileTracker.NoOp, _sizer); _indexCommitter = new IndexCommitter(_publisher, _indexBackend, _indexReader, _tableIndex, _logFormat.StreamNameIndexConfirmer, _streamNames, _logFormat.EventTypeIndexConfirmer, _logFormat.EventTypes, - _systemStreams, _logFormat.StreamExistenceFilter, _logFormat.StreamExistenceFilterInitializer, new InMemoryCheckpoint(-1), new IndexStatusTracker.NoOp(), new IndexTracker.NoOp(), false); + _systemStreams, _logFormat.StreamExistenceFilter, _logFormat.StreamExistenceFilterInitializer, new InMemoryCheckpoint(-1), new IndexStatusTracker.NoOp(), new IndexTracker.NoOp(), + ITransactionFileTracker.NoOp, false); WriteEvents(); } diff --git a/src/EventStore.Core.Tests/Services/Transport/Grpc/EnumeratorsTests.cs b/src/EventStore.Core.Tests/Services/Transport/Grpc/EnumeratorsTests.cs index 75b61c7438b..e81d39b0d77 100644 --- a/src/EventStore.Core.Tests/Services/Transport/Grpc/EnumeratorsTests.cs +++ b/src/EventStore.Core.Tests/Services/Transport/Grpc/EnumeratorsTests.cs @@ -9,6 +9,7 @@ using EventStore.Core.Services.UserManagement; using EventStore.Core.Tests.Helpers; using EventStore.Core.Tests.TransactionLog; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.Services.Transport.Grpc; @@ -159,6 +160,7 @@ public static SubscriptionWrapper CreateAllSubscription( requiresLeader: false, readIndex: new FakeReadIndex(_ => false, null), uuidOption: new ReadReq.Types.Options.Types.UUIDOption(), + tracker: ITransactionFileTracker.NoOp, cancellationToken: CancellationToken.None)); } @@ -179,6 +181,7 @@ public static SubscriptionWrapper CreateAllSubscriptionFiltered getStreamId, long fromEventNumber, - int maxCount, long beforePosition) { + int maxCount, long beforePosition, ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public IndexReadAllResult ReadAllEventsForward(TFPos pos, int maxCount) { + public IndexReadAllResult ReadAllEventsForward(TFPos pos, int maxCount, + ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public IndexReadAllResult ReadAllEventsBackward(TFPos pos, int maxCount) { + public IndexReadAllResult ReadAllEventsBackward(TFPos pos, int maxCount, + ITransactionFileTracker tracker) { throw new NotImplementedException(); } public IndexReadAllResult ReadAllEventsForwardFiltered(TFPos pos, int maxCount, int maxSearchWindow, - IEventFilter eventFilter) { + IEventFilter eventFilter, + ITransactionFileTracker tracker) { throw new NotImplementedException(); } public IndexReadAllResult ReadAllEventsBackwardFiltered(TFPos pos, int maxCount, int maxSearchWindow, - IEventFilter eventFilter) { + IEventFilter eventFilter, + ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public bool IsStreamDeleted(TStreamId streamId) { + public bool IsStreamDeleted(TStreamId streamId, ITransactionFileTracker tracker) { return _isStreamDeleted(streamId); } - public long GetStreamLastEventNumber(TStreamId streamId) { + public long GetStreamLastEventNumber(TStreamId streamId, ITransactionFileTracker tracker) { if (_metastreams.IsMetaStream(streamId)) - return GetStreamLastEventNumber(_metastreams.OriginalStreamOf(streamId)); + return GetStreamLastEventNumber(_metastreams.OriginalStreamOf(streamId), ITransactionFileTracker.NoOp); return _isStreamDeleted(streamId) ? EventNumber.DeletedStream : 1000000; } - public long GetStreamLastEventNumber_KnownCollisions(TStreamId streamId, long beforePosition) { + public long GetStreamLastEventNumber_KnownCollisions(TStreamId streamId, long beforePosition, ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public long GetStreamLastEventNumber_NoCollisions(ulong stream, Func getStreamId, long beforePosition) { + public long GetStreamLastEventNumber_NoCollisions(ulong stream, Func getStreamId, long beforePosition, ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public StorageMessage.EffectiveAcl GetEffectiveAcl(TStreamId streamId) { + public StorageMessage.EffectiveAcl GetEffectiveAcl(TStreamId streamId, ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public TStreamId GetEventStreamIdByTransactionId(long transactionId) { + public TStreamId GetEventStreamIdByTransactionId(long transactionId, ITransactionFileTracker tracker) { throw new NotImplementedException(); } @@ -130,7 +135,7 @@ public StreamAccess CheckStreamAccess(TStreamId streamId, StreamAccessType strea throw new NotImplementedException(); } - public StreamMetadata GetStreamMetadata(TStreamId streamId) { + public StreamMetadata GetStreamMetadata(TStreamId streamId, ITransactionFileTracker tracker) { throw new NotImplementedException(); } diff --git a/src/EventStore.Core.Tests/TransactionLog/Optimization/tfchunkreader_existsat_optimizer_should.cs b/src/EventStore.Core.Tests/TransactionLog/Optimization/tfchunkreader_existsat_optimizer_should.cs index a341e145ad3..0b878cac3fb 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Optimization/tfchunkreader_existsat_optimizer_should.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Optimization/tfchunkreader_existsat_optimizer_should.cs @@ -1,5 +1,6 @@ using System; using System.Collections.Generic; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.Chunks.TFChunk; using EventStore.Core.TransactionLog.LogRecords; @@ -66,14 +67,14 @@ public void posmap_items_should_exist_in_chunk() { //before optimization Assert.AreEqual(false, _existsAtOptimizer.IsOptimized(chunk)); foreach (var p in posmap) { - Assert.AreEqual(true, chunk.ExistsAt(p.LogPos)); + Assert.AreEqual(true, chunk.ExistsAt(p.LogPos, ITransactionFileTracker.NoOp)); } //after optimization _existsAtOptimizer.Optimize(chunk); Assert.AreEqual(true, _existsAtOptimizer.IsOptimized(chunk)); foreach (var p in posmap) { - Assert.AreEqual(true, chunk.ExistsAt(p.LogPos)); + Assert.AreEqual(true, chunk.ExistsAt(p.LogPos, ITransactionFileTracker.NoOp)); } chunk.MarkForDeletion(); @@ -92,7 +93,7 @@ private TFChunk CreateChunk(int chunkNumber, bool scavenged, out List po Constants.TFChunkInitialReaderCountDefault, Constants.TFChunkMaxReaderCountDefault, false, - new TFChunkTracker.NoOp()); + ITransactionFileTracker.NoOp); long offset = chunkNumber * 1024 * 1024; long logPos = 0 + offset; for (int i = 0, n = ChunkFooter.Size / PosMap.FullSize + 1; i < n; ++i) { diff --git a/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeLifeCycleScenario.cs b/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeLifeCycleScenario.cs index f9a1bfcd1a3..346dbc29714 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeLifeCycleScenario.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeLifeCycleScenario.cs @@ -2,6 +2,7 @@ using System.Threading.Tasks; using EventStore.Core.LogAbstraction; using EventStore.Core.Tests.Services.Storage; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks; using NUnit.Framework; @@ -42,7 +43,7 @@ public override async Task TestFixtureSetUp() { Log = new FakeTFScavengerLog(); FakeTableIndex = new FakeTableIndex(); TfChunkScavenger = new TFChunkScavenger(Serilog.Log.Logger, _dbResult.Db, Log, FakeTableIndex, new FakeReadIndex(_ => false, _logFormat.Metastreams), - _logFormat.Metastreams); + _logFormat.Metastreams, ITransactionFileTracker.NoOp); try { await When().WithTimeout(TimeSpan.FromMinutes(1)); diff --git a/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeTestScenario.cs b/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeTestScenario.cs index aff0f7aadbb..f9b5d524c6e 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeTestScenario.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Scavenging/Helpers/ScavengeTestScenario.cs @@ -65,7 +65,7 @@ public override async Task TestFixtureSetUp() { var emptyStreamId = _logFormat.EmptyStreamId; var tableIndex = new TableIndex(indexDirectory, lowHasher, highHasher, emptyStreamId, () => new HashListMemTable(PTableVersions.IndexV3, maxSize: 200), - () => new TFReaderLease(readerPool), + _ => new TFReaderLease(readerPool, ITransactionFileTracker.NoOp), PTableVersions.IndexV3, 5, Constants.PTableMaxReaderCountDefault, maxSizeForMemory: 100, @@ -88,12 +88,14 @@ public override async Task TestFixtureSetUp() { _dbResult.Db.Config.ReplicationCheckpoint,_dbResult.Db.Config.IndexCheckpoint, new IndexStatusTracker.NoOp(), new IndexTracker.NoOp(), + ITransactionFileTrackerFactory.NoOp, new CacheHitsMissesTracker.NoOp()); readIndex.IndexCommitter.Init(_dbResult.Db.Config.WriterCheckpoint.Read()); ReadIndex = readIndex; var scavenger = new TFChunkScavenger(Serilog.Log.Logger, _dbResult.Db, new FakeTFScavengerLog(), tableIndex, ReadIndex, _logFormat.Metastreams, + ITransactionFileTracker.NoOp, unsafeIgnoreHardDeletes: UnsafeIgnoreHardDelete()); await scavenger.Scavenge(alwaysKeepScavenged: true, mergeChunks: false); } @@ -121,10 +123,10 @@ protected void CheckRecords() { var chunk = _dbResult.Db.Manager.GetChunk(i); var chunkRecords = new List(); - RecordReadResult result = chunk.TryReadFirst(); + RecordReadResult result = chunk.TryReadFirst(ITransactionFileTracker.NoOp); while (result.Success) { chunkRecords.Add(result.LogRecord); - result = chunk.TryReadClosestForward((int)result.NextPosition); + result = chunk.TryReadClosestForward((int)result.NextPosition, ITransactionFileTracker.NoOp); } Assert.AreEqual(_keptRecords[i].Length, chunkRecords.Count, "Wrong number of records in chunk #{0}", i); diff --git a/src/EventStore.Core.Tests/TransactionLog/Scavenging/scavenged_chunk.cs b/src/EventStore.Core.Tests/TransactionLog/Scavenging/scavenged_chunk.cs index d08eb03519b..bd5bde82810 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Scavenging/scavenged_chunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Scavenging/scavenged_chunk.cs @@ -1,5 +1,6 @@ using System; using System.Collections.Generic; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.Chunks.TFChunk; using EventStore.Core.TransactionLog.LogRecords; @@ -15,7 +16,7 @@ public void is_fully_resident_in_memory_when_cached() { Constants.TFChunkInitialReaderCountDefault, Constants.TFChunkMaxReaderCountDefault, false, - new TFChunkTracker.NoOp()); + ITransactionFileTracker.NoOp); long logPos = 0; for (int i = 0, n = ChunkFooter.Size / PosMap.FullSize + 1; i < n; ++i) { map.Add(new PosMap(logPos, (int)logPos)); @@ -26,11 +27,11 @@ public void is_fully_resident_in_memory_when_cached() { chunk.CompleteScavenge(map); - chunk.CacheInMemory(); + chunk.CacheInMemory(ITransactionFileTracker.NoOp); Assert.IsTrue(chunk.IsCached); - var last = chunk.TryReadLast(); + var last = chunk.TryReadLast(ITransactionFileTracker.NoOp); Assert.IsTrue(last.Success); Assert.AreEqual(map[map.Count - 1].ActualPos, last.LogRecord.LogPosition); diff --git a/src/EventStore.Core.Tests/TransactionLog/TFChunkHelper.cs b/src/EventStore.Core.Tests/TransactionLog/TFChunkHelper.cs index a283a8e2dca..2763431f8e3 100644 --- a/src/EventStore.Core.Tests/TransactionLog/TFChunkHelper.cs +++ b/src/EventStore.Core.Tests/TransactionLog/TFChunkHelper.cs @@ -1,4 +1,5 @@ using EventStore.Core.Settings; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Checkpoint; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.Chunks.TFChunk; @@ -72,7 +73,7 @@ public static TFChunkDbConfig CreateDbConfig( public static TFChunk CreateNewChunk(string fileName, int chunkSize = 4096, bool isScavenged = false) { return TFChunk.CreateNew(fileName, chunkSize, 0, 0, isScavenged: isScavenged, inMem: false, unbuffered: false, - writethrough: false, initialReaderCount: Constants.TFChunkInitialReaderCountDefault, maxReaderCount: Constants.TFChunkMaxReaderCountDefault, reduceFileCachePressure: false, tracker: new TFChunkTracker.NoOp()); + writethrough: false, initialReaderCount: Constants.TFChunkInitialReaderCountDefault, maxReaderCount: Constants.TFChunkMaxReaderCountDefault, reduceFileCachePressure: false, ITransactionFileTracker.NoOp); } } } diff --git a/src/EventStore.Core.Tests/TransactionLog/Truncation/TruncateAndReOpenDbScenario.cs b/src/EventStore.Core.Tests/TransactionLog/Truncation/TruncateAndReOpenDbScenario.cs index 79c761851cc..7827341cc87 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Truncation/TruncateAndReOpenDbScenario.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Truncation/TruncateAndReOpenDbScenario.cs @@ -43,7 +43,7 @@ private void ReOpenDb() { var emptyStreamId = _logFormat.EmptyStreamId; TableIndex = new TableIndex(indexDirectory, lowHasher, highHasher, emptyStreamId, () => new HashListMemTable(PTableVersions.IndexV3, MaxEntriesInMemTable * 2), - () => new TFReaderLease(readers), + _ => new TFReaderLease(readers, ITransactionFileTracker.NoOp), PTableVersions.IndexV3, int.MaxValue, Constants.PTableMaxReaderCountDefault, @@ -71,6 +71,7 @@ private void ReOpenDb() { indexCheckpoint: Db.Config.IndexCheckpoint, indexStatusTracker: new IndexStatusTracker.NoOp(), indexTracker: new IndexTracker.NoOp(), + tfTrackers: ITransactionFileTrackerFactory.NoOp, cacheTracker: new CacheHitsMissesTracker.NoOp()); readIndex.IndexCommitter.Init(ChaserCheckpoint.Read()); ReadIndex = readIndex; diff --git a/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_few_chunks_with_index_on_disk_and_then_reopening_db.cs b/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_few_chunks_with_index_on_disk_and_then_reopening_db.cs index 16bb0978803..a551fc0c8d6 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_few_chunks_with_index_on_disk_and_then_reopening_db.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_few_chunks_with_index_on_disk_and_then_reopening_db.cs @@ -1,6 +1,7 @@ using System.IO; using System.Linq; using EventStore.Core.Data; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.TransactionLog.Truncation { @@ -121,7 +122,7 @@ public void read_stream_backward_doesnt_return_truncated_records() { [Test] public void read_all_returns_only_survived_events() { - var res = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100); + var res = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp); var records = res.EventRecords() .Select(r => r.Event) .ToArray(); @@ -134,7 +135,7 @@ public void read_all_returns_only_survived_events() { [Test] public void read_all_backward_doesnt_return_truncated_records() { - var res = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100); + var res = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp); var records = res.EventRecords() .Select(r => r.Event) .ToArray(); @@ -147,7 +148,7 @@ public void read_all_backward_doesnt_return_truncated_records() { [Test] public void read_all_backward_from_last_truncated_record_returns_no_records() { var pos = new TFPos(_event7.LogPosition, _event3.LogPosition); - var res = ReadIndex.ReadAllEventsForward(pos, 100); + var res = ReadIndex.ReadAllEventsForward(pos, 100, ITransactionFileTracker.NoOp); var records = res.EventRecords() .Select(r => r.Event) .ToArray(); diff --git a/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_into_the_middle_of_scavenged_chunk_with_index_in_memory_and_then_reopening_db.cs b/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_into_the_middle_of_scavenged_chunk_with_index_in_memory_and_then_reopening_db.cs index 6debdfd2544..e49f0b33264 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_into_the_middle_of_scavenged_chunk_with_index_in_memory_and_then_reopening_db.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_into_the_middle_of_scavenged_chunk_with_index_in_memory_and_then_reopening_db.cs @@ -1,6 +1,7 @@ using System.IO; using System.Linq; using EventStore.Core.Data; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.TransactionLog.Truncation { @@ -126,7 +127,7 @@ public void read_stream_backward_doesnt_return_truncated_records() { [Test] public void read_all_forward_returns_only_survived_events() { - var res = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100); + var res = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp); var records = res.EventRecords() .Select(r => r.Event) .ToArray(); @@ -137,7 +138,7 @@ public void read_all_forward_returns_only_survived_events() { [Test] public void read_all_backward_doesnt_return_truncated_records() { - var res = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100); + var res = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp); var records = res.EventRecords() .Select(r => r.Event) .ToArray(); diff --git a/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_single_uncompleted_chunk_with_index_in_memory_and_then_reopening_db.cs b/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_single_uncompleted_chunk_with_index_in_memory_and_then_reopening_db.cs index 8993c0615f6..5a813096add 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_single_uncompleted_chunk_with_index_in_memory_and_then_reopening_db.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Truncation/when_truncating_single_uncompleted_chunk_with_index_in_memory_and_then_reopening_db.cs @@ -1,5 +1,6 @@ using System.Linq; using EventStore.Core.Data; +using EventStore.Core.TransactionLog; using NUnit.Framework; namespace EventStore.Core.Tests.TransactionLog.Truncation { @@ -67,7 +68,7 @@ public void read_stream_backward_doesnt_return_truncated_records() { [Test] public void read_all_forward_doesnt_return_truncated_records() { - var res = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100); + var res = ReadIndex.ReadAllEventsForward(new TFPos(0, 0), 100, ITransactionFileTracker.NoOp); var records = res.EventRecords() .Select(r => r.Event) .ToArray(); @@ -77,7 +78,7 @@ public void read_all_forward_doesnt_return_truncated_records() { [Test] public void read_all_backward_doesnt_return_truncated_records() { - var res = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100); + var res = ReadIndex.ReadAllEventsBackward(GetBackwardReadPos(), 100, ITransactionFileTracker.NoOp); var records = res.EventRecords() .Select(r => r.Event) .ToArray(); @@ -88,7 +89,7 @@ public void read_all_backward_doesnt_return_truncated_records() { [Test] public void read_all_backward_from_last_truncated_record_returns_no_records() { var pos = new TFPos(_event3.LogPosition, _event3.LogPosition); - var res = ReadIndex.ReadAllEventsForward(pos, 100); + var res = ReadIndex.ReadAllEventsForward(pos, 100, ITransactionFileTracker.NoOp); var records = res.EventRecords() .Select(r => r.Event) .ToArray(); diff --git a/src/EventStore.Core.Tests/TransactionLog/Validation/when_validating_tfchunk_db.cs b/src/EventStore.Core.Tests/TransactionLog/Validation/when_validating_tfchunk_db.cs index 7d953814feb..0c9550d45ac 100644 --- a/src/EventStore.Core.Tests/TransactionLog/Validation/when_validating_tfchunk_db.cs +++ b/src/EventStore.Core.Tests/TransactionLog/Validation/when_validating_tfchunk_db.cs @@ -5,6 +5,7 @@ using System.Threading; using System.Threading.Tasks; using EventStore.Core.Exceptions; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Checkpoint; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.FileNamingStrategy; @@ -446,7 +447,7 @@ public void when_prelast_chunk_corrupted_throw_hash_validation_exception() { .WriteTo.Sink(sink) .MinimumLevel.Verbose() .CreateLogger()) - using (var db = new TFChunkDb(config, new TFChunkTracker.NoOp(), log)) { + using (var db = new TFChunkDb(config, ITransactionFileTracker.NoOp, log)) { byte[] contents = new byte[config.ChunkSize]; for (var i = 0; i < config.ChunkSize; i++) { contents[i] = 0; diff --git a/src/EventStore.Core.Tests/TransactionLog/tfchunk_get_actual_raw_position_should.cs b/src/EventStore.Core.Tests/TransactionLog/tfchunk_get_actual_raw_position_should.cs index 804532598c7..1c386d91075 100644 --- a/src/EventStore.Core.Tests/TransactionLog/tfchunk_get_actual_raw_position_should.cs +++ b/src/EventStore.Core.Tests/TransactionLog/tfchunk_get_actual_raw_position_should.cs @@ -2,6 +2,7 @@ using System.Collections.Generic; using System.IO; using EventStore.Core.LogAbstraction; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.Chunks.TFChunk; using EventStore.Core.TransactionLog.LogRecords; @@ -69,7 +70,7 @@ public void return_correct_positions_for_an_incomplete_unscavenged_chunk() { Assert.AreEqual(numEvents, logPositions.Count); foreach(var logPos in logPositions) - Assert.AreEqual(ChunkHeader.Size + logPos, chunk.GetActualRawPosition(logPos)); + Assert.AreEqual(ChunkHeader.Size + logPos, chunk.GetActualRawPosition(logPos, ITransactionFileTracker.NoOp)); Assert.IsNull(posMap); } @@ -87,7 +88,7 @@ public void return_correct_positions_for_a_complete_unscavenged_chunk() { Assert.AreEqual(numEvents, logPositions.Count); foreach(var logPos in logPositions) - Assert.AreEqual(ChunkHeader.Size + logPos, chunk.GetActualRawPosition(logPos)); + Assert.AreEqual(ChunkHeader.Size + logPos, chunk.GetActualRawPosition(logPos, ITransactionFileTracker.NoOp)); Assert.IsNull(posMap); } @@ -107,7 +108,7 @@ public void return_correct_positions_for_a_scavenged_chunk() { Assert.AreEqual(numEvents, posMap.Count); for (int i = 0; i < numEvents; i++) { Assert.AreEqual(posMap[i].LogPos, logPositions[i]); - Assert.AreEqual(ChunkHeader.Size + posMap[i].ActualPos, chunk.GetActualRawPosition(logPositions[i])); + Assert.AreEqual(ChunkHeader.Size + posMap[i].ActualPos, chunk.GetActualRawPosition(logPositions[i], ITransactionFileTracker.NoOp)); } } @@ -125,9 +126,9 @@ public void return_minus_one_for_positions_that_are_outside_the_range_of_an_unsc Assert.IsNull(posMap); Assert.AreEqual(chunk.LogicalDataSize, chunk.PhysicalDataSize); - Assert.AreEqual(ChunkHeader.Size + chunk.LogicalDataSize - 1, chunk.GetActualRawPosition(chunk.LogicalDataSize - 1)); - Assert.AreEqual(-1, chunk.GetActualRawPosition(chunk.LogicalDataSize)); - Assert.AreEqual(-1, chunk.GetActualRawPosition(chunk.LogicalDataSize + 1)); + Assert.AreEqual(ChunkHeader.Size + chunk.LogicalDataSize - 1, chunk.GetActualRawPosition(chunk.LogicalDataSize - 1, ITransactionFileTracker.NoOp)); + Assert.AreEqual(-1, chunk.GetActualRawPosition(chunk.LogicalDataSize, ITransactionFileTracker.NoOp)); + Assert.AreEqual(-1, chunk.GetActualRawPosition(chunk.LogicalDataSize + 1, ITransactionFileTracker.NoOp)); } [Test] @@ -143,8 +144,8 @@ public void return_minus_one_for_positions_that_do_not_exist_in_a_scavenged_chun Assert.AreEqual(1, logPositions.Count); Assert.AreEqual(1, posMap.Count); - Assert.AreEqual(ChunkHeader.Size + posMap[0].ActualPos, chunk.GetActualRawPosition(logPositions[0])); - Assert.AreEqual(-1, chunk.GetActualRawPosition(logPositions[0] + 1)); + Assert.AreEqual(ChunkHeader.Size + posMap[0].ActualPos, chunk.GetActualRawPosition(logPositions[0], ITransactionFileTracker.NoOp)); + Assert.AreEqual(-1, chunk.GetActualRawPosition(logPositions[0] + 1, ITransactionFileTracker.NoOp)); } [Test] @@ -157,7 +158,7 @@ public void throw_argument_out_of_range_exception_for_negative_positions() { out _, out _); - Assert.Throws(() => chunk.GetActualRawPosition(-1)); + Assert.Throws(() => chunk.GetActualRawPosition(-1, ITransactionFileTracker.NoOp)); } } } diff --git a/src/EventStore.Core.Tests/TransactionLog/when_appending_to_a_tfchunk_and_flushing.cs b/src/EventStore.Core.Tests/TransactionLog/when_appending_to_a_tfchunk_and_flushing.cs index 988bd7840f0..e638b6abebb 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_appending_to_a_tfchunk_and_flushing.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_appending_to_a_tfchunk_and_flushing.cs @@ -60,7 +60,7 @@ public void the_updated_position_is_returned() { [Test] public void the_record_can_be_read_at_exact_position() { - var res = _chunk.TryReadAt(0, couldBeScavenged: false); + var res = _chunk.TryReadAt(0, couldBeScavenged: false, tracker: ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(_record, res.LogRecord); Assert.AreEqual(_result.OldPosition, res.LogRecord.LogPosition); @@ -68,7 +68,7 @@ public void the_record_can_be_read_at_exact_position() { [Test] public void the_record_can_be_read_as_first_one() { - var res = _chunk.TryReadFirst(); + var res = _chunk.TryReadFirst(ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(_record, res.LogRecord); Assert.AreEqual(_record.GetSizeWithLengthPrefixAndSuffix(), res.NextPosition); @@ -76,7 +76,7 @@ public void the_record_can_be_read_as_first_one() { [Test] public void the_record_can_be_read_as_closest_forward_to_pos_zero() { - var res = _chunk.TryReadClosestForward(0); + var res = _chunk.TryReadClosestForward(0, ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(_record, res.LogRecord); Assert.AreEqual(_record.GetSizeWithLengthPrefixAndSuffix(), res.NextPosition); @@ -84,7 +84,7 @@ public void the_record_can_be_read_as_closest_forward_to_pos_zero() { [Test] public void the_record_can_be_read_as_closest_backward_from_end() { - var res = _chunk.TryReadClosestBackward(_record.GetSizeWithLengthPrefixAndSuffix()); + var res = _chunk.TryReadClosestBackward(_record.GetSizeWithLengthPrefixAndSuffix(), ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(_record, res.LogRecord); Assert.AreEqual(0, res.NextPosition); @@ -92,7 +92,7 @@ public void the_record_can_be_read_as_closest_backward_from_end() { [Test] public void the_record_can_be_read_as_last_one() { - var res = _chunk.TryReadLast(); + var res = _chunk.TryReadLast(ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(_record, res.LogRecord); Assert.AreEqual(0, res.NextPosition); diff --git a/src/EventStore.Core.Tests/TransactionLog/when_chasing_a_chunked_transaction_log.cs b/src/EventStore.Core.Tests/TransactionLog/when_chasing_a_chunked_transaction_log.cs index d1534cbb998..2e12a4ace84 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_chasing_a_chunked_transaction_log.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_chasing_a_chunked_transaction_log.cs @@ -1,6 +1,7 @@ using System; using System.IO; using EventStore.Core.LogAbstraction; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Checkpoint; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.Chunks.TFChunk; @@ -34,7 +35,7 @@ public void try_read_returns_false_when_writer_checkpoint_is_zero() { var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, writerchk, chaserchk)); db.Open(); - var chaser = new TFChunkChaser(db, writerchk, new InMemoryCheckpoint(), false); + var chaser = new TFChunkChaser(db, writerchk, new InMemoryCheckpoint(), false, ITransactionFileTracker.NoOp); chaser.Open(); ILogRecord record; @@ -55,7 +56,7 @@ public void try_read_returns_false_when_writer_checksum_is_equal_to_reader_check chaserchk.Write(12); chaserchk.Flush(); - var chaser = new TFChunkChaser(db, writerchk, chaserchk, false); + var chaser = new TFChunkChaser(db, writerchk, chaserchk, false, ITransactionFileTracker.NoOp); chaser.Open(); ILogRecord record; @@ -102,7 +103,7 @@ public void try_read_returns_record_when_writerchecksum_ahead() { var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, writerchk, chaserchk)); db.Open(); - var chaser = new TFChunkChaser(db, writerchk, chaserchk, false); + var chaser = new TFChunkChaser(db, writerchk, chaserchk, false, ITransactionFileTracker.NoOp); chaser.Open(); ILogRecord record; @@ -151,7 +152,7 @@ public void try_read_returns_record_when_record_bigger_than_internal_buffer() { writerchk.Write(recordToWrite.GetSizeWithLengthPrefixAndSuffix()); - var reader = new TFChunkChaser(db, writerchk, chaserchk, false); + var reader = new TFChunkChaser(db, writerchk, chaserchk, false, ITransactionFileTracker.NoOp); reader.Open(); ILogRecord record; @@ -198,7 +199,7 @@ public void try_read_returns_record_when_writerchecksum_equal() { writerchk.Write(recordToWrite.GetSizeWithLengthPrefixAndSuffix()); - var chaser = new TFChunkChaser(db, writerchk, chaserchk, false); + var chaser = new TFChunkChaser(db, writerchk, chaserchk, false, ITransactionFileTracker.NoOp); chaser.Open(); ILogRecord record; diff --git a/src/EventStore.Core.Tests/TransactionLog/when_closing_the_database.cs b/src/EventStore.Core.Tests/TransactionLog/when_closing_the_database.cs index 500ee6153ae..89a33e1393e 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_closing_the_database.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_closing_the_database.cs @@ -1,6 +1,7 @@ using System; using System.IO; using System.Threading.Tasks; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Checkpoint; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.Chunks.TFChunk; @@ -67,7 +68,7 @@ public override Task SetUp() { public void checkpoints_should_be_flushed_only_when_chunks_are_properly_closed(bool chunksClosed) { if (!chunksClosed) { // acquire a reader to prevent the chunk from being properly closed - _db.Manager.GetChunk(0).AcquireReader(); + _db.Manager.GetChunk(0).AcquireReader(ITransactionFileTracker.NoOp); } var writer = new TFChunkWriter(_db); diff --git a/src/EventStore.Core.Tests/TransactionLog/when_creating_chunked_transaction_chaser.cs b/src/EventStore.Core.Tests/TransactionLog/when_creating_chunked_transaction_chaser.cs index 093f5176c81..224d96af9cb 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_creating_chunked_transaction_chaser.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_creating_chunked_transaction_chaser.cs @@ -12,19 +12,19 @@ public class when_creating_chunked_transaction_chaser : SpecificationWithDirecto [Test] public void a_null_file_config_throws_argument_null_exception() { Assert.Throws( - () => new TFChunkChaser(null, new InMemoryCheckpoint(0), new InMemoryCheckpoint(0), false)); + () => new TFChunkChaser(null, new InMemoryCheckpoint(0), new InMemoryCheckpoint(0), false, ITransactionFileTracker.NoOp)); } [Test] public void a_null_writer_checksum_throws_argument_null_exception() { using var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, 0)); - Assert.Throws(() => new TFChunkChaser(db, null, new InMemoryCheckpoint(), false)); + Assert.Throws(() => new TFChunkChaser(db, null, new InMemoryCheckpoint(), false, ITransactionFileTracker.NoOp)); } [Test] public void a_null_chaser_checksum_throws_argument_null_exception() { using var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, 0)); - Assert.Throws(() => new TFChunkChaser(db, new InMemoryCheckpoint(), null, false)); + Assert.Throws(() => new TFChunkChaser(db, new InMemoryCheckpoint(), null, false, ITransactionFileTracker.NoOp)); } } } diff --git a/src/EventStore.Core.Tests/TransactionLog/when_creating_tfchunk_from_empty_file.cs b/src/EventStore.Core.Tests/TransactionLog/when_creating_tfchunk_from_empty_file.cs index 12252ee5739..332de588b54 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_creating_tfchunk_from_empty_file.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_creating_tfchunk_from_empty_file.cs @@ -1,5 +1,6 @@ using System; using System.IO; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks.TFChunk; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; @@ -43,31 +44,31 @@ public void append_does_not_throw_exception() { [Test] public void there_is_no_record_at_pos_zero() { - var res = _chunk.TryReadAt(0, couldBeScavenged: true); + var res = _chunk.TryReadAt(0, couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp); Assert.IsFalse(res.Success); } [Test] public void there_is_no_first_record() { - var res = _chunk.TryReadFirst(); + var res = _chunk.TryReadFirst(ITransactionFileTracker.NoOp); Assert.IsFalse(res.Success); } [Test] public void there_is_no_closest_forward_record_to_pos_zero() { - var res = _chunk.TryReadClosestForward(0); + var res = _chunk.TryReadClosestForward(0, ITransactionFileTracker.NoOp); Assert.IsFalse(res.Success); } [Test] public void there_is_no_closest_backward_record_from_end() { - var res = _chunk.TryReadClosestForward(0); + var res = _chunk.TryReadClosestForward(0, ITransactionFileTracker.NoOp); Assert.IsFalse(res.Success); } [Test] public void there_is_no_last_record() { - var res = _chunk.TryReadLast(); + var res = _chunk.TryReadLast(ITransactionFileTracker.NoOp); Assert.IsFalse(res.Success); } } diff --git a/src/EventStore.Core.Tests/TransactionLog/when_destroying_a_tfchunk_that_is_locked.cs b/src/EventStore.Core.Tests/TransactionLog/when_destroying_a_tfchunk_that_is_locked.cs index 33a9d21bf32..986b461a9c9 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_destroying_a_tfchunk_that_is_locked.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_destroying_a_tfchunk_that_is_locked.cs @@ -1,4 +1,5 @@ using System.IO; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.Chunks.TFChunk; using NUnit.Framework; @@ -15,7 +16,7 @@ public override void SetUp() { _chunk = TFChunkHelper.CreateNewChunk(Filename, 1000); _chunk.Complete(); _chunk.UnCacheFromMemory(); - _reader = _chunk.AcquireReader(); + _reader = _chunk.AcquireReader(ITransactionFileTracker.NoOp); _chunk.MarkForDeletion(); } diff --git a/src/EventStore.Core.Tests/TransactionLog/when_having_scavenged_tfchunk_with_all_records_removed.cs b/src/EventStore.Core.Tests/TransactionLog/when_having_scavenged_tfchunk_with_all_records_removed.cs index 647a766cccb..90e3340d63f 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_having_scavenged_tfchunk_with_all_records_removed.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_having_scavenged_tfchunk_with_all_records_removed.cs @@ -83,7 +83,7 @@ public override async Task TestFixtureSetUp() { var scavenger = new TFChunkScavenger(Serilog.Log.Logger, _db, new FakeTFScavengerLog(), new FakeTableIndex(), new FakeReadIndex(x => EqualityComparer.Default.Equals(x, streamId), _logFormat.Metastreams), - _logFormat.Metastreams); + _logFormat.Metastreams, ITransactionFileTracker.NoOp); await scavenger.Scavenge(alwaysKeepScavenged: true, mergeChunks: false); _scavengedChunk = _db.Manager.GetChunk(0); @@ -116,47 +116,47 @@ public void third_record_was_written() { [Test] public void prepare1_cant_be_read_at_position() { - var res = _scavengedChunk.TryReadAt((int)_p1.LogPosition, couldBeScavenged: true); + var res = _scavengedChunk.TryReadAt((int)_p1.LogPosition, couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp); Assert.IsFalse(res.Success); } [Test] public void commit1_cant_be_read_at_position() { - var res = _scavengedChunk.TryReadAt((int)_c1.LogPosition, couldBeScavenged: true); + var res = _scavengedChunk.TryReadAt((int)_c1.LogPosition, couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp); Assert.IsFalse(res.Success); } [Test] public void prepare2_cant_be_read_at_position() { - var res = _scavengedChunk.TryReadAt((int)_p2.LogPosition, couldBeScavenged: true); + var res = _scavengedChunk.TryReadAt((int)_p2.LogPosition, couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp); Assert.IsFalse(res.Success); } [Test] public void commit2_cant_be_read_at_position() { - var res = _scavengedChunk.TryReadAt((int)_c2.LogPosition, couldBeScavenged: true); + var res = _scavengedChunk.TryReadAt((int)_c2.LogPosition, couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp); Assert.IsFalse(res.Success); } [Test] public void prepare3_cant_be_read_at_position() { - var res = _scavengedChunk.TryReadAt((int)_p3.LogPosition, couldBeScavenged: true); + var res = _scavengedChunk.TryReadAt((int)_p3.LogPosition, couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp); Assert.IsFalse(res.Success); } [Test] public void commit3_cant_be_read_at_position() { - var res = _scavengedChunk.TryReadAt((int)_c3.LogPosition, couldBeScavenged: true); + var res = _scavengedChunk.TryReadAt((int)_c3.LogPosition, couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp); Assert.IsFalse(res.Success); } [Test] public void sequencial_read_returns_no_records() { var records = new List(); - RecordReadResult res = _scavengedChunk.TryReadFirst(); + RecordReadResult res = _scavengedChunk.TryReadFirst(ITransactionFileTracker.NoOp); while (res.Success) { records.Add(res.LogRecord); - res = _scavengedChunk.TryReadClosestForward((int)res.NextPosition); + res = _scavengedChunk.TryReadClosestForward((int)res.NextPosition, ITransactionFileTracker.NoOp); } if (LogFormatHelper.IsV2) { diff --git a/src/EventStore.Core.Tests/TransactionLog/when_marking_for_deletion_a_tfchunk_that_has_been_locked_and_unlocked.cs b/src/EventStore.Core.Tests/TransactionLog/when_marking_for_deletion_a_tfchunk_that_has_been_locked_and_unlocked.cs index e21ef99f282..cfc5ef4cec3 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_marking_for_deletion_a_tfchunk_that_has_been_locked_and_unlocked.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_marking_for_deletion_a_tfchunk_that_has_been_locked_and_unlocked.cs @@ -1,4 +1,5 @@ using System.IO; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks.TFChunk; using NUnit.Framework; @@ -11,7 +12,7 @@ public class when_marking_for_deletion_a_tfchunk_that_has_been_locked_and_unlock public override void SetUp() { base.SetUp(); _chunk = TFChunkHelper.CreateNewChunk(Filename, 1000); - var reader = _chunk.AcquireReader(); + var reader = _chunk.AcquireReader(ITransactionFileTracker.NoOp); _chunk.MarkForDeletion(); reader.Release(); } diff --git a/src/EventStore.Core.Tests/TransactionLog/when_opening_existing_tfchunk.cs b/src/EventStore.Core.Tests/TransactionLog/when_opening_existing_tfchunk.cs index feaa6c0c0f3..44c0c0c68d9 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_opening_existing_tfchunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_opening_existing_tfchunk.cs @@ -1,4 +1,5 @@ using System; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.Chunks.TFChunk; using EventStore.Core.TransactionLog.LogRecords; @@ -18,7 +19,8 @@ public override void TestFixtureSetUp() { _testChunk = TFChunk.FromCompletedFile(Filename, true, false, Constants.TFChunkInitialReaderCountDefault, Constants.TFChunkMaxReaderCountDefault, - reduceFileCachePressure: false, tracker: new TFChunkTracker.NoOp()); + reduceFileCachePressure: false, + tracker: ITransactionFileTracker.NoOp); } [TearDown] diff --git a/src/EventStore.Core.Tests/TransactionLog/when_opening_tfchunk_from_non_existing_file.cs b/src/EventStore.Core.Tests/TransactionLog/when_opening_tfchunk_from_non_existing_file.cs index 4604e10e2bb..50cdb9882fd 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_opening_tfchunk_from_non_existing_file.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_opening_tfchunk_from_non_existing_file.cs @@ -1,4 +1,5 @@ using EventStore.Core.Exceptions; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.Chunks.TFChunk; using NUnit.Framework; @@ -8,8 +9,8 @@ namespace EventStore.Core.Tests.TransactionLog { public class when_opening_tfchunk_from_non_existing_file : SpecificationWithFile { [Test] public void it_should_throw_a_file_not_found_exception() { - Assert.Throws(() => TFChunk.FromCompletedFile(Filename, verifyHash: true, - unbufferedRead: false, initialReaderCount: Constants.TFChunkInitialReaderCountDefault, maxReaderCount: Constants.TFChunkMaxReaderCountDefault, reduceFileCachePressure: false, tracker: new TFChunkTracker.NoOp())); + Assert.Throws(() => TFChunk.FromCompletedFile(Filename, verifyHash: true, tracker: ITransactionFileTracker.NoOp, + unbufferedRead: false, initialReaderCount: Constants.TFChunkInitialReaderCountDefault, maxReaderCount: Constants.TFChunkMaxReaderCountDefault, reduceFileCachePressure: false)); } } } diff --git a/src/EventStore.Core.Tests/TransactionLog/when_reading_a_single_record.cs b/src/EventStore.Core.Tests/TransactionLog/when_reading_a_single_record.cs index 2123e8681c2..1ed9c35252a 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_reading_a_single_record.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_reading_a_single_record.cs @@ -86,7 +86,7 @@ public void all_records_can_be_read() { RecordReadResult res; for (var i = 0; i < RecordsCount; i++) { var rec = _records[i]; - res = reader.TryReadAt(rec.LogPosition, couldBeScavenged: true); + res = reader.TryReadAt(rec.LogPosition, couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(rec, res.LogRecord); diff --git a/src/EventStore.Core.Tests/TransactionLog/when_reading_an_empty_chunked_transaction_log.cs b/src/EventStore.Core.Tests/TransactionLog/when_reading_an_empty_chunked_transaction_log.cs index fafd13ba2d5..64f07208f02 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_reading_an_empty_chunked_transaction_log.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_reading_an_empty_chunked_transaction_log.cs @@ -20,7 +20,7 @@ public void try_read_returns_false_when_writer_checksum_is_zero() { db.Open(); var reader = new TFChunkReader(db, writerchk, 0); - Assert.IsFalse(reader.TryReadNext().Success); + Assert.IsFalse(reader.TryReadNext(ITransactionFileTracker.NoOp).Success); db.Close(); } @@ -37,7 +37,7 @@ public void try_read_does_not_cache_anything_and_returns_record_once_it_is_writt var reader = new TFChunkReader(db, writerchk, 0); - Assert.IsFalse(reader.TryReadNext().Success); + Assert.IsFalse(reader.TryReadNext(ITransactionFileTracker.NoOp).Success); var recordFactory = LogFormatHelper.RecordFactory; var streamId = LogFormatHelper.StreamId; @@ -48,7 +48,7 @@ public void try_read_does_not_cache_anything_and_returns_record_once_it_is_writt writer.Flush(); writer.Close(); - var res = reader.TryReadNext(); + var res = reader.TryReadNext(ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(rec, res.LogRecord); diff --git a/src/EventStore.Core.Tests/TransactionLog/when_reading_cached_empty_scavenged_tfchunk.cs b/src/EventStore.Core.Tests/TransactionLog/when_reading_cached_empty_scavenged_tfchunk.cs index 086e5cdf6ae..c60878532f6 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_reading_cached_empty_scavenged_tfchunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_reading_cached_empty_scavenged_tfchunk.cs @@ -1,3 +1,4 @@ +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks.TFChunk; using NUnit.Framework; @@ -11,7 +12,7 @@ public override void TestFixtureSetUp() { base.TestFixtureSetUp(); _chunk = TFChunkHelper.CreateNewChunk(Filename, isScavenged: true); _chunk.CompleteScavenge(new PosMap[0]); - _chunk.CacheInMemory(); + _chunk.CacheInMemory(ITransactionFileTracker.NoOp); } [OneTimeTearDown] @@ -22,27 +23,27 @@ public override void TestFixtureTearDown() { [Test] public void no_record_at_exact_position_can_be_read() { - Assert.IsFalse(_chunk.TryReadAt(0, couldBeScavenged: true).Success); + Assert.IsFalse(_chunk.TryReadAt(0, couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp).Success); } [Test] public void no_record_can_be_read_as_first_record() { - Assert.IsFalse(_chunk.TryReadFirst().Success); + Assert.IsFalse(_chunk.TryReadFirst(ITransactionFileTracker.NoOp).Success); } [Test] public void no_record_can_be_read_as_closest_forward_record() { - Assert.IsFalse(_chunk.TryReadClosestForward(0).Success); + Assert.IsFalse(_chunk.TryReadClosestForward(0, ITransactionFileTracker.NoOp).Success); } [Test] public void no_record_can_be_read_as_closest_backward_record() { - Assert.IsFalse(_chunk.TryReadClosestBackward(0).Success); + Assert.IsFalse(_chunk.TryReadClosestBackward(0, ITransactionFileTracker.NoOp).Success); } [Test] public void no_record_can_be_read_as_last_record() { - Assert.IsFalse(_chunk.TryReadLast().Success); + Assert.IsFalse(_chunk.TryReadLast(ITransactionFileTracker.NoOp).Success); } } } diff --git a/src/EventStore.Core.Tests/TransactionLog/when_reading_from_a_cached_tfchunk.cs b/src/EventStore.Core.Tests/TransactionLog/when_reading_from_a_cached_tfchunk.cs index a7164447673..90487f978b5 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_reading_from_a_cached_tfchunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_reading_from_a_cached_tfchunk.cs @@ -29,9 +29,9 @@ public override void TestFixtureSetUp() { _result = _chunk.TryAppend(_record); _chunk.Flush(); _chunk.Complete(); - _cachedChunk = TFChunk.FromCompletedFile(Filename, verifyHash: true, unbufferedRead: false, - initialReaderCount: Constants.TFChunkInitialReaderCountDefault, maxReaderCount: Constants.TFChunkMaxReaderCountDefault, reduceFileCachePressure: false, tracker: new TFChunkTracker.NoOp()); - _cachedChunk.CacheInMemory(); + _cachedChunk = TFChunk.FromCompletedFile(Filename, verifyHash: true, unbufferedRead: false, tracker: ITransactionFileTracker.NoOp, + initialReaderCount: Constants.TFChunkInitialReaderCountDefault, maxReaderCount: Constants.TFChunkMaxReaderCountDefault, reduceFileCachePressure: false); + _cachedChunk.CacheInMemory(ITransactionFileTracker.NoOp); } [OneTimeTearDown] @@ -55,7 +55,7 @@ public void the_chunk_is_cached() { [Test] public void the_record_can_be_read_at_exact_position() { - var res = _cachedChunk.TryReadAt(0, couldBeScavenged: true); + var res = _cachedChunk.TryReadAt(0, couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(_record, res.LogRecord); Assert.AreEqual(_result.OldPosition, res.LogRecord.LogPosition); @@ -63,7 +63,7 @@ public void the_record_can_be_read_at_exact_position() { [Test] public void the_record_can_be_read_as_first_record() { - var res = _cachedChunk.TryReadFirst(); + var res = _cachedChunk.TryReadFirst(ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(_record.GetSizeWithLengthPrefixAndSuffix(), res.NextPosition); Assert.AreEqual(_record, res.LogRecord); @@ -72,7 +72,7 @@ public void the_record_can_be_read_as_first_record() { [Test] public void the_record_can_be_read_as_closest_forward_to_zero_pos() { - var res = _cachedChunk.TryReadClosestForward(0); + var res = _cachedChunk.TryReadClosestForward(0, ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(_record.GetSizeWithLengthPrefixAndSuffix(), res.NextPosition); Assert.AreEqual(_record, res.LogRecord); @@ -81,7 +81,7 @@ public void the_record_can_be_read_as_closest_forward_to_zero_pos() { [Test] public void the_record_can_be_read_as_closest_backward_from_end() { - var res = _cachedChunk.TryReadClosestBackward(_record.GetSizeWithLengthPrefixAndSuffix()); + var res = _cachedChunk.TryReadClosestBackward(_record.GetSizeWithLengthPrefixAndSuffix(), ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(0, res.NextPosition); Assert.AreEqual(_record, res.LogRecord); @@ -89,7 +89,7 @@ public void the_record_can_be_read_as_closest_backward_from_end() { [Test] public void the_record_can_be_read_as_last() { - var res = _cachedChunk.TryReadLast(); + var res = _cachedChunk.TryReadLast(ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(0, res.NextPosition); Assert.AreEqual(_record, res.LogRecord); diff --git a/src/EventStore.Core.Tests/TransactionLog/when_reading_logical_bytes_bulk_from_a_chunk.cs b/src/EventStore.Core.Tests/TransactionLog/when_reading_logical_bytes_bulk_from_a_chunk.cs index c334ea8290a..4459436e39c 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_reading_logical_bytes_bulk_from_a_chunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_reading_logical_bytes_bulk_from_a_chunk.cs @@ -1,6 +1,7 @@ using System; using EventStore.Core.LogAbstraction; using EventStore.Core.LogV2; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks.TFChunk; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; @@ -15,7 +16,7 @@ public when_reading_logical_bytes_bulk_from_a_chunk() { [Test] public void the_file_will_not_be_deleted_until_reader_released() { var chunk = TFChunkHelper.CreateNewChunk(GetFilePathFor("file1"), 2000); - using (var reader = chunk.AcquireReader()) { + using (var reader = chunk.AcquireReader(ITransactionFileTracker.NoOp)) { chunk.MarkForDeletion(); var buffer = new byte[1024]; var result = reader.ReadNextDataBytes(1024, buffer); @@ -29,7 +30,7 @@ public void the_file_will_not_be_deleted_until_reader_released() { [Test] public void a_read_on_new_file_can_be_performed_but_returns_nothing() { var chunk = TFChunkHelper.CreateNewChunk(GetFilePathFor("file1"), 2000); - using (var reader = chunk.AcquireReader()) { + using (var reader = chunk.AcquireReader(ITransactionFileTracker.NoOp)) { var buffer = new byte[1024]; var result = reader.ReadNextDataBytes(1024, buffer); Assert.IsFalse(result.IsEOF); @@ -44,7 +45,7 @@ public void a_read_on_new_file_can_be_performed_but_returns_nothing() { public void a_read_past_end_of_completed_chunk_does_not_include_footer() { var chunk = TFChunkHelper.CreateNewChunk(GetFilePathFor("file1"), 300); chunk.Complete(); // chunk has 0 bytes of actual data - using (var reader = chunk.AcquireReader()) { + using (var reader = chunk.AcquireReader(ITransactionFileTracker.NoOp)) { var buffer = new byte[1024]; var result = reader.ReadNextDataBytes(1024, buffer); Assert.IsTrue(result.IsEOF); @@ -60,7 +61,7 @@ public void a_read_past_end_of_completed_chunk_does_not_include_footer() { public void a_read_on_scavenged_chunk_does_not_include_map() { var chunk = TFChunkHelper.CreateNewChunk(GetFilePathFor("afile"), 200, isScavenged: true); chunk.CompleteScavenge(new[] {new PosMap(0, 0), new PosMap(1, 1)}); - using (var reader = chunk.AcquireReader()) { + using (var reader = chunk.AcquireReader(ITransactionFileTracker.NoOp)) { var buffer = new byte[1024]; var result = reader.ReadNextDataBytes(1024, buffer); Assert.IsTrue(result.IsEOF); @@ -82,7 +83,7 @@ public void if_asked_for_more_than_buffer_size_will_only_read_buffer_size() { new byte[2000], null); Assert.IsTrue(chunk.TryAppend(rec).Success, "Record was not appended"); - using (var reader = chunk.AcquireReader()) { + using (var reader = chunk.AcquireReader(ITransactionFileTracker.NoOp)) { var buffer = new byte[1024]; var result = reader.ReadNextDataBytes(3000, buffer); Assert.IsFalse(result.IsEOF); @@ -98,7 +99,7 @@ public void a_read_past_eof_doesnt_return_eof_if_chunk_is_not_yet_completed() { var chunk = TFChunkHelper.CreateNewChunk(GetFilePathFor("file1"), 300); var rec = LogRecord.Commit(0, Guid.NewGuid(), 0, 0); Assert.IsTrue(chunk.TryAppend(rec).Success, "Record was not appended"); - using (var reader = chunk.AcquireReader()) { + using (var reader = chunk.AcquireReader(ITransactionFileTracker.NoOp)) { var buffer = new byte[1024]; var result = reader.ReadNextDataBytes(1024, buffer); Assert.IsFalse(result.IsEOF, "EOF was returned."); @@ -119,7 +120,7 @@ public void a_read_past_eof_returns_eof_if_chunk_is_completed() { Assert.IsTrue(chunk.TryAppend(rec).Success, "Record was not appended"); chunk.Complete(); - using (var reader = chunk.AcquireReader()) { + using (var reader = chunk.AcquireReader(ITransactionFileTracker.NoOp)) { var buffer = new byte[1024]; var result = reader.ReadNextDataBytes(1024, buffer); Assert.IsTrue(result.IsEOF, "EOF was not returned."); diff --git a/src/EventStore.Core.Tests/TransactionLog/when_reading_physical_bytes_bulk_from_a_chunk.cs b/src/EventStore.Core.Tests/TransactionLog/when_reading_physical_bytes_bulk_from_a_chunk.cs index 665fd765d11..7f2cb81042a 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_reading_physical_bytes_bulk_from_a_chunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_reading_physical_bytes_bulk_from_a_chunk.cs @@ -1,3 +1,4 @@ +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.Chunks.TFChunk; using NUnit.Framework; @@ -8,7 +9,7 @@ public class when_reading_physical_bytes_bulk_from_a_chunk : SpecificationWithDi [Test] public void the_file_will_not_be_deleted_until_reader_released() { var chunk = TFChunkHelper.CreateNewChunk(GetFilePathFor("file1"), 2000); - using (var reader = chunk.AcquireReader()) { + using (var reader = chunk.AcquireReader(ITransactionFileTracker.NoOp)) { chunk.MarkForDeletion(); var buffer = new byte[1024]; var result = reader.ReadNextRawBytes(1024, buffer); @@ -22,7 +23,7 @@ public void the_file_will_not_be_deleted_until_reader_released() { [Test] public void a_read_on_new_file_can_be_performed() { var chunk = TFChunkHelper.CreateNewChunk(GetFilePathFor("file1"), 2000); - using (var reader = chunk.AcquireReader()) { + using (var reader = chunk.AcquireReader(ITransactionFileTracker.NoOp)) { var buffer = new byte[1024]; var result = reader.ReadNextRawBytes(1024, buffer); Assert.IsFalse(result.IsEOF); @@ -69,7 +70,7 @@ public void a_read_past_end_of_completed_chunk_does_include_header_or_footer() [Test] public void if_asked_for_more_than_buffer_size_will_only_read_buffer_size() { var chunk = TFChunkHelper.CreateNewChunk(GetFilePathFor("file1"), 3000); - using (var reader = chunk.AcquireReader()) { + using (var reader = chunk.AcquireReader(ITransactionFileTracker.NoOp)) { var buffer = new byte[1024]; var result = reader.ReadNextRawBytes(3000, buffer); Assert.IsFalse(result.IsEOF); @@ -83,7 +84,7 @@ public void if_asked_for_more_than_buffer_size_will_only_read_buffer_size() { [Test] public void a_read_past_eof_returns_eof_and_no_footer() { var chunk = TFChunkHelper.CreateNewChunk(GetFilePathFor("file1"), 300); - using (var reader = chunk.AcquireReader()) { + using (var reader = chunk.AcquireReader(ITransactionFileTracker.NoOp)) { var buffer = new byte[8092]; var result = reader.ReadNextRawBytes(8092, buffer); Assert.IsTrue(result.IsEOF); diff --git a/src/EventStore.Core.Tests/TransactionLog/when_reading_uncached_empty_scavenged_tfchunk.cs b/src/EventStore.Core.Tests/TransactionLog/when_reading_uncached_empty_scavenged_tfchunk.cs index 19686c9d20c..ebacd6d6092 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_reading_uncached_empty_scavenged_tfchunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_reading_uncached_empty_scavenged_tfchunk.cs @@ -1,3 +1,4 @@ +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks.TFChunk; using NUnit.Framework; @@ -21,27 +22,27 @@ public override void TestFixtureTearDown() { [Test] public void no_record_at_exact_position_can_be_read() { - Assert.IsFalse(_chunk.TryReadAt(0, couldBeScavenged: true).Success); + Assert.IsFalse(_chunk.TryReadAt(0, couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp).Success); } [Test] public void no_record_can_be_read_as_first_record() { - Assert.IsFalse(_chunk.TryReadFirst().Success); + Assert.IsFalse(_chunk.TryReadFirst(ITransactionFileTracker.NoOp).Success); } [Test] public void no_record_can_be_read_as_closest_forward_record() { - Assert.IsFalse(_chunk.TryReadClosestForward(0).Success); + Assert.IsFalse(_chunk.TryReadClosestForward(0, ITransactionFileTracker.NoOp).Success); } [Test] public void no_record_can_be_read_as_closest_backward_record() { - Assert.IsFalse(_chunk.TryReadClosestBackward(0).Success); + Assert.IsFalse(_chunk.TryReadClosestBackward(0, ITransactionFileTracker.NoOp).Success); } [Test] public void no_record_can_be_read_as_last_record() { - Assert.IsFalse(_chunk.TryReadLast().Success); + Assert.IsFalse(_chunk.TryReadLast(ITransactionFileTracker.NoOp).Success); } } } diff --git a/src/EventStore.Core.Tests/TransactionLog/when_sequentially_reading_db_with_few_chunks.cs b/src/EventStore.Core.Tests/TransactionLog/when_sequentially_reading_db_with_few_chunks.cs index 1952b77c05f..d85999d5046 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_sequentially_reading_db_with_few_chunks.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_sequentially_reading_db_with_few_chunks.cs @@ -85,7 +85,7 @@ public void all_records_could_be_read_with_forward_pass() { SeqReadResult res; int count = 0; - while ((res = seqReader.TryReadNext()).Success) { + while ((res = seqReader.TryReadNext(ITransactionFileTracker.NoOp)).Success) { var rec = _records[count]; Assert.AreEqual(rec, res.LogRecord); Assert.AreEqual(rec.LogPosition, res.RecordPrePosition); @@ -103,7 +103,7 @@ public void all_records_could_be_read_with_backward_pass() { SeqReadResult res; int count = 0; - while ((res = seqReader.TryReadPrev()).Success) { + while ((res = seqReader.TryReadPrev(ITransactionFileTracker.NoOp)).Success) { var rec = _records[RecordsCount - count - 1]; Assert.AreEqual(rec, res.LogRecord); Assert.AreEqual(rec.LogPosition, res.RecordPrePosition); @@ -121,7 +121,7 @@ public void all_records_could_be_read_doing_forward_backward_pass() { SeqReadResult res; int count1 = 0; - while ((res = seqReader.TryReadNext()).Success) { + while ((res = seqReader.TryReadNext(ITransactionFileTracker.NoOp)).Success) { var rec = _records[count1]; Assert.AreEqual(rec, res.LogRecord); Assert.AreEqual(rec.LogPosition, res.RecordPrePosition); @@ -133,7 +133,7 @@ public void all_records_could_be_read_doing_forward_backward_pass() { Assert.AreEqual(RecordsCount, count1); int count2 = 0; - while ((res = seqReader.TryReadPrev()).Success) { + while ((res = seqReader.TryReadPrev(ITransactionFileTracker.NoOp)).Success) { var rec = _records[RecordsCount - count2 - 1]; Assert.AreEqual(rec, res.LogRecord); Assert.AreEqual(rec.LogPosition, res.RecordPrePosition); @@ -152,7 +152,7 @@ public void records_can_be_read_forward_starting_from_any_position() { SeqReadResult res; int count = 0; - while ((res = seqReader.TryReadNext()).Success) { + while ((res = seqReader.TryReadNext(ITransactionFileTracker.NoOp)).Success) { var rec = _records[i + count]; Assert.AreEqual(rec, res.LogRecord); Assert.AreEqual(rec.LogPosition, res.RecordPrePosition); @@ -172,7 +172,7 @@ public void records_can_be_read_backward_starting_from_any_position() { SeqReadResult res; int count = 0; - while ((res = seqReader.TryReadPrev()).Success) { + while ((res = seqReader.TryReadPrev(ITransactionFileTracker.NoOp)).Success) { var rec = _records[i - count - 1]; Assert.AreEqual(rec, res.LogRecord); Assert.AreEqual(rec.LogPosition, res.RecordPrePosition); diff --git a/src/EventStore.Core.Tests/TransactionLog/when_sequentially_reading_db_with_one_chunk.cs b/src/EventStore.Core.Tests/TransactionLog/when_sequentially_reading_db_with_one_chunk.cs index 8b632096064..ae4cbe25f57 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_sequentially_reading_db_with_one_chunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_sequentially_reading_db_with_one_chunk.cs @@ -72,7 +72,7 @@ public void all_records_could_be_read_with_forward_pass() { SeqReadResult res; int count = 0; - while ((res = seqReader.TryReadNext()).Success) { + while ((res = seqReader.TryReadNext(ITransactionFileTracker.NoOp)).Success) { var rec = _records[count]; Assert.AreEqual(rec, res.LogRecord); Assert.AreEqual(rec.LogPosition, res.RecordPrePosition); @@ -90,7 +90,7 @@ public void only_the_last_record_is_marked_eof() { SeqReadResult res; int count = 0; - while ((res = seqReader.TryReadNext()).Success) { + while ((res = seqReader.TryReadNext(ITransactionFileTracker.NoOp)).Success) { ++count; Assert.AreEqual(count == RecordsCount, res.Eof); } @@ -104,7 +104,7 @@ public void all_records_could_be_read_with_backward_pass() { SeqReadResult res; int count = 0; - while ((res = seqReader.TryReadPrev()).Success) { + while ((res = seqReader.TryReadPrev(ITransactionFileTracker.NoOp)).Success) { var rec = _records[RecordsCount - count - 1]; Assert.AreEqual(rec, res.LogRecord); Assert.AreEqual(rec.LogPosition, res.RecordPrePosition); @@ -122,7 +122,7 @@ public void all_records_could_be_read_doing_forward_backward_pass() { SeqReadResult res; int count1 = 0; - while ((res = seqReader.TryReadNext()).Success) { + while ((res = seqReader.TryReadNext(ITransactionFileTracker.NoOp)).Success) { var rec = _records[count1]; Assert.AreEqual(rec, res.LogRecord); Assert.AreEqual(rec.LogPosition, res.RecordPrePosition); @@ -134,7 +134,7 @@ public void all_records_could_be_read_doing_forward_backward_pass() { Assert.AreEqual(RecordsCount, count1); int count2 = 0; - while ((res = seqReader.TryReadPrev()).Success) { + while ((res = seqReader.TryReadPrev(ITransactionFileTracker.NoOp)).Success) { var rec = _records[RecordsCount - count2 - 1]; Assert.AreEqual(rec, res.LogRecord); Assert.AreEqual(rec.LogPosition, res.RecordPrePosition); @@ -153,7 +153,7 @@ public void records_can_be_read_forward_starting_from_any_position() { SeqReadResult res; int count = 0; - while ((res = seqReader.TryReadNext()).Success) { + while ((res = seqReader.TryReadNext(ITransactionFileTracker.NoOp)).Success) { var rec = _records[i + count]; Assert.AreEqual(rec, res.LogRecord); Assert.AreEqual(rec.LogPosition, res.RecordPrePosition); @@ -173,7 +173,7 @@ public void records_can_be_read_backward_starting_from_any_position() { SeqReadResult res; int count = 0; - while ((res = seqReader.TryReadPrev()).Success) { + while ((res = seqReader.TryReadPrev(ITransactionFileTracker.NoOp)).Success) { var rec = _records[i - count - 1]; Assert.AreEqual(rec, res.LogRecord); Assert.AreEqual(rec.LogPosition, res.RecordPrePosition); diff --git a/src/EventStore.Core.Tests/TransactionLog/when_sequentially_reading_db_with_one_chunk_ending_with_prepare.cs b/src/EventStore.Core.Tests/TransactionLog/when_sequentially_reading_db_with_one_chunk_ending_with_prepare.cs index 5f1ed1e4d69..88606866f1b 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_sequentially_reading_db_with_one_chunk_ending_with_prepare.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_sequentially_reading_db_with_one_chunk_ending_with_prepare.cs @@ -83,7 +83,7 @@ public void only_the_last_record_is_marked_eof() { SeqReadResult res; int count = 0; - while ((res = seqReader.TryReadNext()).Success) { + while ((res = seqReader.TryReadNext(ITransactionFileTracker.NoOp)).Success) { ++count; Assert.AreEqual(count == RecordsCount, res.Eof); } diff --git a/src/EventStore.Core.Tests/TransactionLog/when_uncaching_a_tfchunk.cs b/src/EventStore.Core.Tests/TransactionLog/when_uncaching_a_tfchunk.cs index 24d8ed146bd..db889b6fb0d 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_uncaching_a_tfchunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_uncaching_a_tfchunk.cs @@ -29,9 +29,9 @@ public override void TestFixtureSetUp() { _result = _chunk.TryAppend(_record); _chunk.Flush(); _chunk.Complete(); - _uncachedChunk = TFChunk.FromCompletedFile(Filename, verifyHash: true, unbufferedRead: false, - initialReaderCount: Constants.TFChunkInitialReaderCountDefault, maxReaderCount: Constants.TFChunkMaxReaderCountDefault, reduceFileCachePressure: false, tracker: new TFChunkTracker.NoOp()); - _uncachedChunk.CacheInMemory(); + _uncachedChunk = TFChunk.FromCompletedFile(Filename, verifyHash: true, unbufferedRead: false, tracker: ITransactionFileTracker.NoOp, + initialReaderCount: Constants.TFChunkInitialReaderCountDefault, maxReaderCount: Constants.TFChunkMaxReaderCountDefault, reduceFileCachePressure: false); + _uncachedChunk.CacheInMemory(ITransactionFileTracker.NoOp); _uncachedChunk.UnCacheFromMemory(); } @@ -66,7 +66,7 @@ public void the_correct_position_is_returned() { [Test] public void the_record_can_be_read() { - var res = _uncachedChunk.TryReadAt(0, couldBeScavenged: true); + var res = _uncachedChunk.TryReadAt(0, couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(_record, res.LogRecord); Assert.AreEqual(_result.OldPosition, res.LogRecord.LogPosition); diff --git a/src/EventStore.Core.Tests/TransactionLog/when_unlocking_a_tfchunk_that_has_been_marked_for_deletion.cs b/src/EventStore.Core.Tests/TransactionLog/when_unlocking_a_tfchunk_that_has_been_marked_for_deletion.cs index a0eb05cb364..85790aec2f2 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_unlocking_a_tfchunk_that_has_been_marked_for_deletion.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_unlocking_a_tfchunk_that_has_been_marked_for_deletion.cs @@ -1,4 +1,5 @@ using System.IO; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks.TFChunk; using NUnit.Framework; @@ -11,7 +12,7 @@ public class when_unlocking_a_tfchunk_that_has_been_marked_for_deletion : Specif public override void SetUp() { base.SetUp(); _chunk = TFChunkHelper.CreateNewChunk(Filename, 1000); - var reader = _chunk.AcquireReader(); + var reader = _chunk.AcquireReader(ITransactionFileTracker.NoOp); _chunk.MarkForDeletion(); reader.Release(); } diff --git a/src/EventStore.Core.Tests/TransactionLog/when_writing_commit_record_to_file.cs b/src/EventStore.Core.Tests/TransactionLog/when_writing_commit_record_to_file.cs index 2638cbcd542..5207c3b66c7 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_writing_commit_record_to_file.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_writing_commit_record_to_file.cs @@ -43,7 +43,7 @@ public void Teardown() { [Test] public void the_data_is_written() { - using (var reader = new TFChunkChaser(_db, _writerCheckpoint, _db.Config.ChaserCheckpoint, false)) { + using (var reader = new TFChunkChaser(_db, _writerCheckpoint, _db.Config.ChaserCheckpoint, false, ITransactionFileTracker.NoOp)) { reader.Open(); ILogRecord r; Assert.IsTrue(reader.TryReadNext(out r)); @@ -66,7 +66,7 @@ public void the_checksum_is_updated() { [Test] public void trying_to_read_past_writer_checksum_returns_false() { var reader = new TFChunkReader(_db, _writerCheckpoint); - Assert.IsFalse(reader.TryReadAt(_writerCheckpoint.Read(), couldBeScavenged: true).Success); + Assert.IsFalse(reader.TryReadAt(_writerCheckpoint.Read(), couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp).Success); } } } diff --git a/src/EventStore.Core.Tests/TransactionLog/when_writing_multiple_records_to_a_tfchunk.cs b/src/EventStore.Core.Tests/TransactionLog/when_writing_multiple_records_to_a_tfchunk.cs index ef0dbb2f01c..f607b8e1037 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_writing_multiple_records_to_a_tfchunk.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_writing_multiple_records_to_a_tfchunk.cs @@ -1,4 +1,5 @@ using System; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks.TFChunk; using EventStore.Core.TransactionLog.LogRecords; using NUnit.Framework; @@ -66,7 +67,7 @@ public void the_second_record_was_written() { [Test] public void the_first_record_can_be_read_at_position() { - var res = _chunk.TryReadAt((int)_position1, couldBeScavenged: true); + var res = _chunk.TryReadAt((int)_position1, couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.IsTrue(res.LogRecord is IPrepareLogRecord); Assert.AreEqual(_prepare1, res.LogRecord); @@ -74,7 +75,7 @@ public void the_first_record_can_be_read_at_position() { [Test] public void the_second_record_can_be_read_at_position() { - var res = _chunk.TryReadAt((int)_position2, couldBeScavenged: true); + var res = _chunk.TryReadAt((int)_position2, couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.IsTrue(res.LogRecord is IPrepareLogRecord); Assert.AreEqual(_prepare2, res.LogRecord); @@ -82,7 +83,7 @@ public void the_second_record_can_be_read_at_position() { [Test] public void the_first_record_can_be_read() { - var res = _chunk.TryReadFirst(); + var res = _chunk.TryReadFirst(ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(_prepare1.GetSizeWithLengthPrefixAndSuffix(), res.NextPosition); Assert.IsTrue(res.LogRecord is IPrepareLogRecord); @@ -91,7 +92,7 @@ public void the_first_record_can_be_read() { [Test] public void the_second_record_can_be_read_as_closest_forward_after_first() { - var res = _chunk.TryReadClosestForward(_prepare1.GetSizeWithLengthPrefixAndSuffix()); + var res = _chunk.TryReadClosestForward(_prepare1.GetSizeWithLengthPrefixAndSuffix(), ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(_prepare1.GetSizeWithLengthPrefixAndSuffix() + _prepare2.GetSizeWithLengthPrefixAndSuffix(), res.NextPosition); @@ -102,13 +103,13 @@ public void the_second_record_can_be_read_as_closest_forward_after_first() { [Test] public void cannot_read_past_second_record_with_closest_forward_method() { var res = _chunk.TryReadClosestForward(_prepare1.GetSizeWithLengthPrefixAndSuffix() - + _prepare2.GetSizeWithLengthPrefixAndSuffix()); + + _prepare2.GetSizeWithLengthPrefixAndSuffix(), ITransactionFileTracker.NoOp); Assert.IsFalse(res.Success); } [Test] public void the_seconds_record_can_be_read_as_last() { - var res = _chunk.TryReadLast(); + var res = _chunk.TryReadLast(ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(_prepare1.GetSizeWithLengthPrefixAndSuffix(), res.NextPosition); Assert.AreEqual(_prepare2, res.LogRecord); @@ -116,7 +117,7 @@ public void the_seconds_record_can_be_read_as_last() { [Test] public void the_first_record_can_be_read_as_closest_backward_after_last() { - var res = _chunk.TryReadClosestBackward(_prepare1.GetSizeWithLengthPrefixAndSuffix()); + var res = _chunk.TryReadClosestBackward(_prepare1.GetSizeWithLengthPrefixAndSuffix(), ITransactionFileTracker.NoOp); Assert.IsTrue(res.Success); Assert.AreEqual(0, res.NextPosition); Assert.AreEqual(_prepare1, res.LogRecord); @@ -124,7 +125,7 @@ public void the_first_record_can_be_read_as_closest_backward_after_last() { [Test] public void cannot_read_backward_from_zero_pos() { - var res = _chunk.TryReadClosestBackward(0); + var res = _chunk.TryReadClosestBackward(0, ITransactionFileTracker.NoOp); Assert.IsFalse(res.Success); } } diff --git a/src/EventStore.Core.Tests/TransactionLog/when_writing_prepare_record_to_file.cs b/src/EventStore.Core.Tests/TransactionLog/when_writing_prepare_record_to_file.cs index 1772d558e4a..90562f9834e 100644 --- a/src/EventStore.Core.Tests/TransactionLog/when_writing_prepare_record_to_file.cs +++ b/src/EventStore.Core.Tests/TransactionLog/when_writing_prepare_record_to_file.cs @@ -60,7 +60,7 @@ public void Teardown() { [Test] public void the_data_is_written() { //TODO MAKE THIS ACTUALLY ASSERT OFF THE FILE AND READER FROM KNOWN FILE - using (var reader = new TFChunkChaser(_db, _writerCheckpoint, _db.Config.ChaserCheckpoint, false)) { + using (var reader = new TFChunkChaser(_db, _writerCheckpoint, _db.Config.ChaserCheckpoint, false, ITransactionFileTracker.NoOp)) { reader.Open(); ILogRecord r; Assert.IsTrue(reader.TryReadNext(out r)); @@ -94,7 +94,7 @@ public void the_checksum_is_updated() { [Test] public void trying_to_read_past_writer_checksum_returns_false() { var reader = new TFChunkReader(_db, _writerCheckpoint); - Assert.IsFalse(reader.TryReadAt(_writerCheckpoint.Read(), couldBeScavenged: true).Success); + Assert.IsFalse(reader.TryReadAt(_writerCheckpoint.Read(), couldBeScavenged: true, tracker: ITransactionFileTracker.NoOp).Success); } } } diff --git a/src/EventStore.Core.XUnit.Tests/LogAbstraction/LogFormatAbstractorV3Tests.cs b/src/EventStore.Core.XUnit.Tests/LogAbstraction/LogFormatAbstractorV3Tests.cs index 3d90467e87d..839d7ad9890 100644 --- a/src/EventStore.Core.XUnit.Tests/LogAbstraction/LogFormatAbstractorV3Tests.cs +++ b/src/EventStore.Core.XUnit.Tests/LogAbstraction/LogFormatAbstractorV3Tests.cs @@ -317,7 +317,7 @@ class MockIndexReader : IIndexReader { public int StreamCount => _index[LogV3SystemStreams.StreamsCreatedStreamNumber].Count; public int EventTypeCount => _index[LogV3SystemStreams.EventTypesStreamNumber].Count; - public IPrepareLogRecord ReadPrepare(StreamId streamId, long eventNumber) { + public IPrepareLogRecord ReadPrepare(StreamId streamId, long eventNumber, ITransactionFileTracker tracker) { // simulates what would be in the index. return _index[streamId][eventNumber]; } @@ -328,56 +328,56 @@ public IPrepareLogRecord ReadPrepare(StreamId streamId, long eventNumb public long HashCollisions => throw new NotImplementedException(); - public StorageMessage.EffectiveAcl GetEffectiveAcl(StreamId streamId) => + public StorageMessage.EffectiveAcl GetEffectiveAcl(StreamId streamId, ITransactionFileTracker tracker) => throw new NotImplementedException(); - public IndexReadEventInfoResult ReadEventInfo_KeepDuplicates(uint streamId, long eventNumber) { + public IndexReadEventInfoResult ReadEventInfo_KeepDuplicates(uint streamId, long eventNumber, ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public StreamId GetEventStreamIdByTransactionId(long transactionId) => + public StreamId GetEventStreamIdByTransactionId(long transactionId, ITransactionFileTracker tracker) => throw new NotImplementedException(); - public long GetStreamLastEventNumber(StreamId streamId) { + public long GetStreamLastEventNumber(StreamId streamId, ITransactionFileTracker tracker) { if (streamId == LogV3SystemStreams.StreamsCreatedStreamNumber) return _index[streamId].Count - 1; throw new NotImplementedException(); } - public StreamMetadata GetStreamMetadata(StreamId streamId) => + public StreamMetadata GetStreamMetadata(StreamId streamId, ITransactionFileTracker tracker) => throw new NotImplementedException(); - public IndexReadEventResult ReadEvent(string streamName, StreamId streamId, long eventNumber) => + public IndexReadEventResult ReadEvent(string streamName, StreamId streamId, long eventNumber, ITransactionFileTracker tracker) => throw new NotImplementedException(); - public IndexReadStreamResult ReadStreamEventsBackward(string streamName, StreamId streamId, long fromEventNumber, int maxCount) => + public IndexReadStreamResult ReadStreamEventsBackward(string streamName, StreamId streamId, long fromEventNumber, int maxCount, ITransactionFileTracker tracker) => throw new NotImplementedException(); - public IndexReadStreamResult ReadStreamEventsForward(string streamName, StreamId streamId, long fromEventNumber, int maxCount) => + public IndexReadStreamResult ReadStreamEventsForward(string streamName, StreamId streamId, long fromEventNumber, int maxCount, ITransactionFileTracker tracker) => throw new NotImplementedException(); - public IndexReadEventInfoResult ReadEventInfoForward_KnownCollisions(uint streamId, long fromEventNumber, int maxCount, long beforePosition) => + public IndexReadEventInfoResult ReadEventInfoForward_KnownCollisions(uint streamId, long fromEventNumber, int maxCount, long beforePosition, ITransactionFileTracker tracker) => throw new NotImplementedException(); public IndexReadEventInfoResult ReadEventInfoForward_NoCollisions(ulong stream, long fromEventNumber, int maxCount, long beforePosition) => throw new NotImplementedException(); - public IndexReadEventInfoResult ReadEventInfoBackward_KnownCollisions(uint streamId, long fromEventNumber, int maxCount, long beforePosition) => + public IndexReadEventInfoResult ReadEventInfoBackward_KnownCollisions(uint streamId, long fromEventNumber, int maxCount, long beforePosition, ITransactionFileTracker tracker) => throw new NotImplementedException(); - public IndexReadEventInfoResult ReadEventInfoBackward_NoCollisions(ulong stream, Func getStreamId, long fromEventNumber, int maxCount, long beforePosition) => + public IndexReadEventInfoResult ReadEventInfoBackward_NoCollisions(ulong stream, Func getStreamId, long fromEventNumber, int maxCount, long beforePosition, ITransactionFileTracker tracker) => throw new NotImplementedException(); - public long GetStreamLastEventNumber_KnownCollisions(uint streamId, long beforePosition) => + public long GetStreamLastEventNumber_KnownCollisions(uint streamId, long beforePosition, ITransactionFileTracker tracker) => throw new NotImplementedException(); - public long GetStreamLastEventNumber_NoCollisions(ulong stream, Func getStreamId, long beforePosition) => + public long GetStreamLastEventNumber_NoCollisions(ulong stream, Func getStreamId, long beforePosition, ITransactionFileTracker tracker) => throw new NotImplementedException(); } } public class MockIndexBackend : IIndexBackend { - public TFReaderLease BorrowReader() { + public TFReaderLease BorrowReader(ITransactionFileTracker tracker) { throw new NotImplementedException(); } diff --git a/src/EventStore.Core.XUnit.Tests/LogV2/LogV2StreamExistenceFilterInitializerTests.cs b/src/EventStore.Core.XUnit.Tests/LogV2/LogV2StreamExistenceFilterInitializerTests.cs index 13ee44540e8..3e2863b7ad3 100644 --- a/src/EventStore.Core.XUnit.Tests/LogV2/LogV2StreamExistenceFilterInitializerTests.cs +++ b/src/EventStore.Core.XUnit.Tests/LogV2/LogV2StreamExistenceFilterInitializerTests.cs @@ -27,14 +27,14 @@ public LogV2StreamExistenceFilterInitializerTests() { version: PTableVersions.IndexV4, maxSize: 1_000_000 * 2), maxSizeForMemory: 100_000, - tfReaderFactory: () => new TFReaderLease(_log), + tfReaderFactory: tracker => new TFReaderLease(_log, ITransactionFileTracker.NoOp), ptableVersion: PTableVersions.IndexV4, maxAutoMergeIndexLevel: int.MaxValue, pTableMaxReaderCount: 5); _tableIndex.Initialize(0); _sut = new LogV2StreamExistenceFilterInitializer( - tfReaderFactory: () => new TFReaderLease(_log), + tfReaderFactory: tracker => new TFReaderLease(_log, ITransactionFileTracker.NoOp), tableIndex: _tableIndex); var hasher = new CompositeHasher(new XXHashUnsafe(), new Murmur3AUnsafe()); _filter = new MockExistenceFilter(hasher); @@ -180,14 +180,14 @@ public void cannot_initialize_with_v1_indexes() { memTableFactory: () => new HashListMemTable( version: PTableVersions.IndexV1, maxSize: 1_000_000 * 2), - tfReaderFactory: () => throw new Exception("index tried to read the log"), + tfReaderFactory: _ => throw new Exception("index tried to read the log"), ptableVersion: PTableVersions.IndexV1, maxAutoMergeIndexLevel: int.MaxValue, pTableMaxReaderCount: 5); tableIndex.Initialize(0); var sut = new LogV2StreamExistenceFilterInitializer( - tfReaderFactory: () => throw new Exception("initializer tried to read the log"), + tfReaderFactory: _ => throw new Exception("initializer tried to read the log"), tableIndex: tableIndex); var filter = new MockExistenceFilter(hasher: null); diff --git a/src/EventStore.Core.XUnit.Tests/LogV3/PartitionManagerTests.cs b/src/EventStore.Core.XUnit.Tests/LogV3/PartitionManagerTests.cs index e488be40708..fc762d5c10e 100644 --- a/src/EventStore.Core.XUnit.Tests/LogV3/PartitionManagerTests.cs +++ b/src/EventStore.Core.XUnit.Tests/LogV3/PartitionManagerTests.cs @@ -213,12 +213,12 @@ public FakeReader(Guid? rootPartitionId, Guid? rootPartitionTypeId, bool without _results.Add(new SeqReadResult(true, false, rootPartition, 0, 0, 0)); } } - + public void Reposition(long position) { _resultIndex = (int) position; } - public SeqReadResult TryReadNext() { + public SeqReadResult TryReadNext(ITransactionFileTracker tracker) { _readCount++; if(_resultIndex < _results.Count) @@ -227,15 +227,15 @@ public SeqReadResult TryReadNext() { return SeqReadResult.Failure; } - public SeqReadResult TryReadPrev() { + public SeqReadResult TryReadPrev(ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public RecordReadResult TryReadAt(long position, bool couldBeScavenged) { + public RecordReadResult TryReadAt(long position, bool couldBeScavenged, ITransactionFileTracker tracker) { throw new NotImplementedException(); } - public bool ExistsAt(long position) { + public bool ExistsAt(long position, ITransactionFileTracker tracker) { return true; } } diff --git a/src/EventStore.Core.XUnit.Tests/Messages/ITransactionFileTrackerFactoryExtensionsTests.cs b/src/EventStore.Core.XUnit.Tests/Messages/ITransactionFileTrackerFactoryExtensionsTests.cs new file mode 100644 index 00000000000..10f4fe8af06 --- /dev/null +++ b/src/EventStore.Core.XUnit.Tests/Messages/ITransactionFileTrackerFactoryExtensionsTests.cs @@ -0,0 +1,68 @@ +using System; +using System.Security.Claims; +using EventStore.Core.Messages; +using EventStore.Core.Messaging; +using EventStore.Core.Services.UserManagement; +using EventStore.Core.TransactionLog; +using EventStore.Core.TransactionLog.LogRecords; +using Xunit; + +namespace EventStore.Core.XUnit.Tests.Messages; + +public class ITransactionFileTrackerFactoryExtensionsTests { + readonly ITransactionFileTrackerFactory _factory = new FakeFactory(); + + [Fact] + public void can_get_for_username() { + var tracker = _factory.For(SystemAccounts.SystemName) as FakeTracker; + Assert.Equal("system", tracker.Username); + } + + [Fact] + public void can_get_for_claims_principal() { + var tracker = _factory.For(SystemAccounts.System) as FakeTracker; + Assert.Equal("system", tracker.Username); + } + + [Fact] + public void can_get_for_request_message() { + var tracker = _factory.For(new FakeReadRequest(SystemAccounts.System)) as FakeTracker; + Assert.Equal("system", tracker.Username); + } + + [Fact] + public void can_get_for_null_username() { + var tracker = _factory.For((string)null) as FakeTracker; + Assert.Equal("anonymous", tracker.Username); + } + + [Fact] + public void can_get_for_null_claims_principal() { + var tracker = _factory.For((ClaimsPrincipal)null) as FakeTracker; + Assert.Equal("anonymous", tracker.Username); + } + + [Fact] + public void can_get_for_null_request_message() { + var tracker = _factory.For((FakeReadRequest)null) as FakeTracker; + Assert.Equal("anonymous", tracker.Username); + } + + class FakeFactory : ITransactionFileTrackerFactory { + public ITransactionFileTracker GetOrAdd(string name) => new FakeTracker(name); + } + + record FakeTracker(string Username) : ITransactionFileTracker { + public void OnRead(ILogRecord record, ITransactionFileTracker.Source source) { + } + + public void OnRead(int bytesRead, ITransactionFileTracker.Source source) { + } + } + + class FakeReadRequest : ClientMessage.ReadRequestMessage { + public FakeReadRequest(ClaimsPrincipal user) : + base(Guid.NewGuid(), Guid.NewGuid(), IEnvelope.NoOp, user, expires: null) { + } + } +} diff --git a/src/EventStore.Core.XUnit.Tests/Scavenge/Infrastructure/Scenario.cs b/src/EventStore.Core.XUnit.Tests/Scavenge/Infrastructure/Scenario.cs index a82db9413d6..d57293e5691 100644 --- a/src/EventStore.Core.XUnit.Tests/Scavenge/Infrastructure/Scenario.cs +++ b/src/EventStore.Core.XUnit.Tests/Scavenge/Infrastructure/Scenario.cs @@ -247,6 +247,7 @@ private async Task RunInternalAsync( } var hasher = new CompositeHasher(lowHasher, highHasher); + var tracker = ITransactionFileTracker.NoOp; var tableIndex = new TableIndex( directory: indexPath, @@ -254,7 +255,7 @@ private async Task RunInternalAsync( highHasher: highHasher, emptyStreamId: logFormat.EmptyStreamId, memTableFactory: () => new HashListMemTable(PTableVersions.IndexV4, maxSize: 200), - tfReaderFactory: () => new TFReaderLease(readerPool), + tfReaderFactory: _ => new TFReaderLease(readerPool, tracker), ptableVersion: PTableVersions.IndexV4, maxAutoMergeIndexLevel: int.MaxValue, pTableMaxReaderCount: ESConsts.PTableInitialReaderCount, @@ -286,6 +287,7 @@ private async Task RunInternalAsync( indexCheckpoint: dbResult.Db.Config.IndexCheckpoint, indexStatusTracker: new IndexStatusTracker.NoOp(), indexTracker: new IndexTracker.NoOp(), + tfTrackers: ITransactionFileTrackerFactory.NoOp, cacheTracker: new CacheHitsMissesTracker.NoOp()); readIndex.IndexCommitter.Init(dbResult.Db.Config.WriterCheckpoint.Read()); @@ -316,9 +318,10 @@ private async Task RunInternalAsync( metastreamLookup, logFormat.StreamIdConverter, dbResult.Db.Config.ReplicationCheckpoint, + tracker, dbConfig.ChunkSize); - var indexReader = new IndexReaderForAccumulator(readIndex); + var indexReader = new IndexReaderForAccumulator(readIndex, tracker); var accumulatorMetastreamLookup = new AdHocMetastreamLookupInterceptor( metastreamLookup, @@ -331,8 +334,9 @@ private async Task RunInternalAsync( var calculatorIndexReader = new AdHocIndexReaderInterceptor( new IndexReaderForCalculator( readIndex, - () => new TFReaderLease(readerPool), - scavengeState.LookupUniqueHashUser), + () => new TFReaderLease(readerPool, tracker), + scavengeState.LookupUniqueHashUser, + tracker), (f, handle, from, maxCount, x) => { if (_calculatingCancellationTrigger != null) if ((handle.Kind == StreamHandle.Kind.Hash && handle.StreamHash == hasher.Hash(_calculatingCancellationTrigger)) || @@ -402,7 +406,8 @@ private async Task RunInternalAsync( new ChunkManagerForExecutor( logger, dbResult.Db.Manager, - dbConfig), + dbConfig, + tracker), Tracer), chunkSize: dbConfig.ChunkSize, unsafeIgnoreHardDeletes: _unsafeIgnoreHardDeletes, @@ -413,13 +418,13 @@ private async Task RunInternalAsync( IChunkMerger chunkMerger = new ChunkMerger( logger: logger, mergeChunks: _mergeChunks, - new OldScavengeChunkMergerBackend(logger, dbResult.Db), + new OldScavengeChunkMergerBackend(logger, dbResult.Db, tracker), throttle: throttle); IIndexExecutor indexExecutor = new IndexExecutor( logger: logger, indexScavenger: cancellationWrappedIndexScavenger, - streamLookup: new ChunkReaderForIndexExecutor(() => new TFReaderLease(readerPool)), + streamLookup: new ChunkReaderForIndexExecutor(() => new TFReaderLease(readerPool, tracker)), unsafeIgnoreHardDeletes: _unsafeIgnoreHardDeletes, restPeriod: restPeriod, throttle: throttle); @@ -574,10 +579,10 @@ protected static void CheckRecords(ILogRecord[][] expected, DbResult actual) { var chunk = actual.Db.Manager.GetChunk(i); var chunkRecords = new List(); - var result = chunk.TryReadFirst(); + var result = chunk.TryReadFirst(ITransactionFileTracker.NoOp); while (result.Success) { chunkRecords.Add(result.LogRecord); - result = chunk.TryReadClosestForward((int)result.NextPosition); + result = chunk.TryReadClosestForward((int)result.NextPosition, ITransactionFileTracker.NoOp); } Assert.True( @@ -640,7 +645,7 @@ private static void CheckIndex( streamId: streamId, fromEventNumber: eventNumber, maxCount: 1, - beforePosition: long.MaxValue) + beforePosition: long.MaxValue, tracker: ITransactionFileTracker.NoOp) : actual.ReadEventInfoForward_NoCollisions( stream: hasher.Hash(streamId), fromEventNumber: eventNumber, @@ -649,7 +654,7 @@ private static void CheckIndex( if (result.EventInfos.Length != 1) { // remember this applies metadata, so is of limited use - var wholeStream = actual.ReadStreamEventsForward($"{streamId}", streamId, fromEventNumber: 0, maxCount: 100); + var wholeStream = actual.ReadStreamEventsForward($"{streamId}", streamId, fromEventNumber: 0, maxCount: 100, tracker: ITransactionFileTracker.NoOp); Assert.True(result.EventInfos.Length == 1, $"Couldn't find {streamId}:{eventNumber} in index."); } @@ -674,7 +679,7 @@ private static void CheckIndex( streamId: streamId, fromEventNumber: 0, maxCount: 1000, - beforePosition: long.MaxValue) + beforePosition: long.MaxValue, tracker: ITransactionFileTracker.NoOp) : actual.ReadEventInfoForward_NoCollisions( stream: hasher.Hash(streamId), fromEventNumber: 0, @@ -714,7 +719,7 @@ private void EmptyRequestedChunks(TFChunkDb db) { initialReaderCount: 1, maxReaderCount: 1, reduceFileCachePressure: false, - new TFChunkTracker.NoOp()); + tracker: ITransactionFileTracker.NoOp); newChunk.CompleteScavenge(null); diff --git a/src/EventStore.Core.XUnit.Tests/Telemetry/TelemetryServiceTests.cs b/src/EventStore.Core.XUnit.Tests/Telemetry/TelemetryServiceTests.cs index 09225c3c4df..75eefc1a5ca 100644 --- a/src/EventStore.Core.XUnit.Tests/Telemetry/TelemetryServiceTests.cs +++ b/src/EventStore.Core.XUnit.Tests/Telemetry/TelemetryServiceTests.cs @@ -7,6 +7,7 @@ using EventStore.Core.Services.TimerService; using EventStore.Core.Telemetry; using EventStore.Core.Tests.TransactionLog; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Checkpoint; using EventStore.Core.TransactionLog.Chunks; using Xunit; @@ -33,6 +34,7 @@ public TelemetryServiceTests() { new EnvelopePublisher(new ChannelEnvelope(channel)), _sink, new InMemoryCheckpoint(0), + ITransactionFileTracker.NoOp, Guid.NewGuid()); } diff --git a/src/EventStore.Core.XUnit.Tests/TransactionLog/Chunks/TFChunkTrackerTests.cs b/src/EventStore.Core.XUnit.Tests/TransactionLog/Chunks/TFChunkTrackerTests.cs index 2adef601e37..f830f0765ab 100644 --- a/src/EventStore.Core.XUnit.Tests/TransactionLog/Chunks/TFChunkTrackerTests.cs +++ b/src/EventStore.Core.XUnit.Tests/TransactionLog/Chunks/TFChunkTrackerTests.cs @@ -3,6 +3,7 @@ using System.Diagnostics.Metrics; using System.Linq; using EventStore.Core.Metrics; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.LogRecords; using EventStore.Core.XUnit.Tests.Metrics; @@ -11,7 +12,7 @@ namespace EventStore.Core.XUnit.Tests.TransactionLog.Chunks; public class TFChunkTrackerTests : IDisposable { - private readonly TFChunkTracker _sut; + private readonly ITransactionFileTracker _sut; private readonly TestMeterListener _listener; public TFChunkTrackerTests() { @@ -20,10 +21,10 @@ public TFChunkTrackerTests() { var byteMetric = new CounterMetric(meter, "eventstore-io", unit: "bytes"); var eventMetric = new CounterMetric(meter, "eventstore-io", unit: "events"); - var readTag = new KeyValuePair("activity", "read"); - _sut = new TFChunkTracker( - readBytes: new CounterSubMetric(byteMetric, new[] {readTag}), - readEvents: new CounterSubMetric(eventMetric, new[] {readTag})); + _sut = new TransactionFileTrackerFactory( + eventMetric: eventMetric, + byteMetric: byteMetric) + .GetOrAdd("alice"); } public void Dispose() { @@ -36,7 +37,7 @@ public void can_observe_prepare_log() { data: new byte[5], meta: new byte[5]); - _sut.OnRead(prepare); + _sut.OnRead(prepare, source: ITransactionFileTracker.Source.File); _listener.Observe(); AssertEventsRead(1); @@ -46,7 +47,7 @@ public void can_observe_prepare_log() { [Fact] public void disregard_system_log() { var system = CreateSystemRecord(); - _sut.OnRead(system); + _sut.OnRead(system, source: ITransactionFileTracker.Source.File); _listener.Observe(); AssertEventsRead(0); @@ -56,7 +57,7 @@ public void disregard_system_log() { [Fact] public void disregard_commit_log() { var system = CreateCommit(); - _sut.OnRead(system); + _sut.OnRead(system, source: ITransactionFileTracker.Source.File); _listener.Observe(); AssertEventsRead(0); @@ -78,15 +79,37 @@ private void AssertMeasurements(string instrumentName, long? expectedValue) { Assert.Collection( actual, m => { + AssertTags(m.Tags, "unknown"); + Assert.Equal(0, m.Value); + }, + m => { + AssertTags(m.Tags, "chunk-cache"); + Assert.Equal(0, m.Value); + }, + m => { + AssertTags(m.Tags, "file"); Assert.Equal(expectedValue, m.Value); - Assert.Collection(m.Tags.ToArray(), t => { - Assert.Equal("activity", t.Key); - Assert.Equal("read", t.Value); - }); }); } } + private void AssertTags(KeyValuePair[] tags, string source) { + Assert.Collection( + tags.ToArray(), + t => { + Assert.Equal("activity", t.Key); + Assert.Equal("read", t.Value); + }, + t => { + Assert.Equal("source", t.Key); + Assert.Equal(source, t.Value); + }, + t => { + Assert.Equal("user", t.Key); + Assert.Equal("alice", t.Value); + }); + + } private static PrepareLogRecord CreatePrepare(byte[] data, byte[] meta) { return new PrepareLogRecord(42, Guid.NewGuid(), Guid.NewGuid(), 42, 42, "tests", null, 42, DateTime.Now, PrepareFlags.Data, "type-test", null, data, meta); diff --git a/src/EventStore.Core/ClusterVNode.cs b/src/EventStore.Core/ClusterVNode.cs index 5eca55bb81a..75fcd23ad18 100644 --- a/src/EventStore.Core/ClusterVNode.cs +++ b/src/EventStore.Core/ClusterVNode.cs @@ -66,6 +66,7 @@ using Microsoft.Data.Sqlite; using Mono.Unix.Native; using ILogger = Serilog.ILogger; +using EventStore.Core.Services.UserManagement; namespace EventStore.Core { public abstract class ClusterVNode { @@ -306,7 +307,7 @@ public ClusterVNode(ClusterVNodeOptions options, metricsConfiguration ??= new(); MetricsBootstrapper.Bootstrap(metricsConfiguration, dbConfig, trackers); - Db = new TFChunkDb(dbConfig, tracker: trackers.TransactionFileTracker); + Db = new TFChunkDb(dbConfig, trackers.TransactionFileTrackers.For(SystemAccounts.System)); TFChunkDbConfig CreateDbConfig( out SystemStatsHelper statsHelper, @@ -596,7 +597,10 @@ TFChunkDbConfig CreateDbConfig( MaxReaderCount = pTableMaxReaderCount, StreamExistenceFilterSize = options.Database.StreamExistenceFilterSize, StreamExistenceFilterCheckpoint = Db.Config.StreamExistenceFilterCheckpoint, - TFReaderLeaseFactory = () => new TFReaderLease(readerPool) + TFReaderLeaseFactory = username => { + var tracker = trackers.TransactionFileTrackers.For(username); + return new TFReaderLease(readerPool, tracker); + } }); ICacheResizer streamInfoCacheResizer; @@ -642,7 +646,10 @@ TFChunkDbConfig CreateDbConfig( logFormat.EmptyStreamId, () => new HashListMemTable(options.IndexBitnessVersion, maxSize: options.Database.MaxMemTableSize * 2), - () => new TFReaderLease(readerPool), + username => { + var tracker = trackers.TransactionFileTrackers.For(username); + return new TFReaderLease(readerPool, tracker); + }, options.IndexBitnessVersion, maxSizeForMemory: options.Database.MaxMemTableSize, maxTablesPerLevel: 2, @@ -680,6 +687,7 @@ TFChunkDbConfig CreateDbConfig( Db.Config.IndexCheckpoint, trackers.IndexStatusTracker, trackers.IndexTracker, + trackers.TransactionFileTrackers, trackers.CacheHitsMissesTracker); _readIndex = readIndex; var writer = new TFChunkWriter(Db); @@ -704,6 +712,7 @@ TFChunkDbConfig CreateDbConfig( logFormat.StreamNameIndex, logFormat.EventTypeIndex, partitionManager, + ITransactionFileTrackerFactory.NoOp, NodeInfo.InstanceId); epochManager.Init(); @@ -734,7 +743,9 @@ TFChunkDbConfig CreateDbConfig( var storageReader = new StorageReaderService(_mainQueue, _mainBus, readIndex, logFormat.SystemStreams, - readerThreadsCount, Db.Config.WriterCheckpoint.AsReadOnly(), inMemReader, _queueStatsManager, + readerThreadsCount, Db.Config.WriterCheckpoint.AsReadOnly(), inMemReader, + trackers.TransactionFileTrackers, + _queueStatsManager, trackers.QueueTrackers); _mainBus.Subscribe(storageReader); @@ -787,7 +798,8 @@ TFChunkDbConfig CreateDbConfig( Db, Db.Config.WriterCheckpoint.AsReadOnly(), Db.Config.ChaserCheckpoint, - Db.Config.OptimizeReadSideCache); + Db.Config.OptimizeReadSideCache, + trackers.TransactionFileTrackers.For(SystemAccounts.SystemChaserName)); var storageChaser = new StorageChaser( _mainQueue, @@ -1123,7 +1135,7 @@ GossipAdvertiseInfo GetGossipAdvertiseInfo() { _mainBus.Subscribe(subscrQueue.WidenFrom()); _mainBus.Subscribe(subscrQueue.WidenFrom()); - var subscription = new SubscriptionsService(_mainQueue, subscrQueue, readIndex); + var subscription = new SubscriptionsService(_mainQueue, subscrQueue, readIndex, trackers.TransactionFileTrackers); subscrBus.Subscribe(subscription); subscrBus.Subscribe(subscription); subscrBus.Subscribe(subscription); @@ -1182,7 +1194,7 @@ GossipAdvertiseInfo GetGossipAdvertiseInfo() { var consumerStrategyRegistry = new PersistentSubscriptionConsumerStrategyRegistry(_mainQueue, _mainBus, additionalPersistentSubscriptionConsumerStrategyFactories); var persistentSubscription = new PersistentSubscriptionService(perSubscrQueue, readIndex, psubDispatcher, - _mainQueue, consumerStrategyRegistry); + _mainQueue, consumerStrategyRegistry, trackers.TransactionFileTrackers); perSubscrBus.Subscribe(persistentSubscription); perSubscrBus.Subscribe(persistentSubscription); perSubscrBus.Subscribe(persistentSubscription); @@ -1269,6 +1281,8 @@ GossipAdvertiseInfo GetGossipAdvertiseInfo() { }, dispose: backend => backend.Dispose()); + var tracker = trackers.TransactionFileTrackers.For(SystemAccounts.SystemScavengeName); + var state = new ScavengeState( logger, longHasher, @@ -1285,8 +1299,9 @@ GossipAdvertiseInfo GetGossipAdvertiseInfo() { logFormat.Metastreams, logFormat.StreamIdConverter, Db.Config.ReplicationCheckpoint, + tracker, TFConsts.ChunkSize), - index: new IndexReaderForAccumulator(readIndex), + index: new IndexReaderForAccumulator(readIndex, tracker), cancellationCheckPeriod: cancellationCheckPeriod, throttle: throttle); @@ -1294,8 +1309,9 @@ GossipAdvertiseInfo GetGossipAdvertiseInfo() { logger: logger, new IndexReaderForCalculator( readIndex, - () => new TFReaderLease(readerPool), - state.LookupUniqueHashUser), + () => new TFReaderLease(readerPool, tracker), + state.LookupUniqueHashUser, + tracker), chunkSize: TFConsts.ChunkSize, cancellationCheckPeriod: cancellationCheckPeriod, buffer: calculatorBuffer, @@ -1304,7 +1320,7 @@ GossipAdvertiseInfo GetGossipAdvertiseInfo() { var chunkExecutor = new ChunkExecutor( logger, logFormat.Metastreams, - new ChunkManagerForExecutor(logger, Db.Manager, Db.Config), + new ChunkManagerForExecutor(logger, Db.Manager, Db.Config, tracker), chunkSize: Db.Config.ChunkSize, unsafeIgnoreHardDeletes: options.Database.UnsafeIgnoreHardDelete, cancellationCheckPeriod: cancellationCheckPeriod, @@ -1314,13 +1330,13 @@ GossipAdvertiseInfo GetGossipAdvertiseInfo() { var chunkMerger = new ChunkMerger( logger: logger, mergeChunks: !options.Database.DisableScavengeMerging, - backend: new OldScavengeChunkMergerBackend(logger, db: Db), + backend: new OldScavengeChunkMergerBackend(logger, db: Db, tracker: tracker), throttle: throttle); var indexExecutor = new IndexExecutor( logger, new IndexScavenger(tableIndex), - new ChunkReaderForIndexExecutor(() => new TFReaderLease(readerPool)), + new ChunkReaderForIndexExecutor(() => new TFReaderLease(readerPool, tracker)), unsafeIgnoreHardDeletes: options.Database.UnsafeIgnoreHardDelete, restPeriod: 32_768, throttle: throttle); @@ -1371,6 +1387,7 @@ GossipAdvertiseInfo GetGossipAdvertiseInfo() { tableIndex: tableIndex, readIndex: readIndex, metastreams: logFormat.SystemStreams, + tfTracker: trackers.TransactionFileTrackers.For(SystemAccounts.SystemScavengeName), unsafeIgnoreHardDeletes: options.Database.UnsafeIgnoreHardDelete, threads: message.Threads))); } @@ -1403,7 +1420,8 @@ GossipAdvertiseInfo GetGossipAdvertiseInfo() { _mainBus.Subscribe(redactionQueue.WidenFrom()); _mainBus.Subscribe(redactionQueue.WidenFrom()); - var redactionService = new RedactionService(redactionQueue, Db, _readIndex, _switchChunksLock); + var redactionService = new RedactionService(redactionQueue, Db, _readIndex, _switchChunksLock, + trackers.TransactionFileTrackers.For(SystemAccounts.SystemRedactionName)); redactionBus.Subscribe(redactionService); redactionBus.Subscribe(redactionService); redactionBus.Subscribe(redactionService); @@ -1436,6 +1454,7 @@ GossipAdvertiseInfo GetGossipAdvertiseInfo() { _mainQueue, new TelemetrySink(options.Application.TelemetryOptout), Db.Config.WriterCheckpoint.AsReadOnly(), + trackers.TransactionFileTrackers.For(SystemAccounts.SystemTelemetryName), memberInfo.InstanceId); _mainBus.Subscribe(telemetryService); _mainBus.Subscribe(telemetryService); @@ -1446,6 +1465,7 @@ GossipAdvertiseInfo GetGossipAdvertiseInfo() { _workersHandler, epochManager, options.Cluster.ClusterSize, options.Cluster.UnsafeAllowSurplusNodes, + trackers.TransactionFileTrackers.For(SystemAccounts.SystemReplicationName), _queueStatsManager); AddTask(leaderReplicationService.Task); _mainBus.Subscribe(leaderReplicationService); diff --git a/src/EventStore.Core/ClusterVNodeStartup.cs b/src/EventStore.Core/ClusterVNodeStartup.cs index 3956d2bf8e1..106a575272c 100644 --- a/src/EventStore.Core/ClusterVNodeStartup.cs +++ b/src/EventStore.Core/ClusterVNodeStartup.cs @@ -181,6 +181,7 @@ public IServiceCollection ConfigureServices(IServiceCollection services) => .AddSingleton(_readIndex) .AddSingleton(new Streams(_mainQueue, _readIndex, _maxAppendSize, _writeTimeout, _expiryStrategy, + _trackers.TransactionFileTrackers, _trackers.GrpcTrackers, _authorizationProvider)) .AddSingleton(new PersistentSubscriptions(_mainQueue, _authorizationProvider)) diff --git a/src/EventStore.Core/Index/TableIndex.cs b/src/EventStore.Core/Index/TableIndex.cs index b4185264e09..47bbd692c9d 100644 --- a/src/EventStore.Core/Index/TableIndex.cs +++ b/src/EventStore.Core/Index/TableIndex.cs @@ -17,6 +17,7 @@ using ILogger = Serilog.ILogger; using EventStore.Core.TransactionLog.LogRecords; using EventStore.LogCommon; +using EventStore.Core.Services.UserManagement; namespace EventStore.Core.Index { public abstract class TableIndex { @@ -49,7 +50,7 @@ public long PrepareCheckpoint { private readonly byte _ptableVersion; private readonly string _directory; private readonly Func _memTableFactory; - private readonly Func _tfReaderFactory; + private readonly Func _tfReaderFactory; private readonly IIndexFilenameProvider _fileNameProvider; private readonly IIndexStatusTracker _statusTracker; @@ -79,7 +80,7 @@ public TableIndex(string directory, IHasher highHasher, TStreamId emptyStreamId, Func memTableFactory, - Func tfReaderFactory, + Func tfReaderFactory, byte ptableVersion, int maxAutoMergeIndexLevel, int pTableMaxReaderCount, @@ -310,7 +311,7 @@ private void ReadOffQueue() { Log.Debug("Performing manual index merge."); _isManualMergePending = false; - using (var reader = _tfReaderFactory()) { + using (var reader = _tfReaderFactory(SystemAccounts.SystemName)) { var manualMergeResult = _indexMap.TryManualMerge( (streamId, currentHash) => UpgradeHash(streamId, currentHash), entry => reader.ExistsAt(entry.Position), @@ -361,7 +362,7 @@ private void ReadOffQueue() { _indexMap.SaveToFile(indexmapFile); if (addResult.CanMergeAny) { - using (var reader = _tfReaderFactory()) { + using (var reader = _tfReaderFactory(SystemAccounts.SystemName)) { MergeResult mergeResult; do { mergeResult = _indexMap.TryMergeOneLevel( @@ -464,7 +465,7 @@ private void ScavengeInternal( try { ct.ThrowIfCancellationRequested(); - using (var reader = _tfReaderFactory()) { + using (var reader = _tfReaderFactory(SystemAccounts.SystemScavengeName)) { var indexmapFile = Path.Combine(_directory, IndexMapFilename); Func existsAt = entry => reader.ExistsAt(entry.Position); diff --git a/src/EventStore.Core/LogAbstraction/LogFormatAbstractor.cs b/src/EventStore.Core/LogAbstraction/LogFormatAbstractor.cs index 2362f44506a..180eebd3b33 100644 --- a/src/EventStore.Core/LogAbstraction/LogFormatAbstractor.cs +++ b/src/EventStore.Core/LogAbstraction/LogFormatAbstractor.cs @@ -21,7 +21,7 @@ public record LogFormatAbstractorOptions { public ICheckpoint StreamExistenceFilterCheckpoint { get; init; } public TimeSpan StreamExistenceFilterCheckpointInterval { get; init; } = TimeSpan.FromSeconds(30); public TimeSpan StreamExistenceFilterCheckpointDelay { get; init; } = TimeSpan.FromSeconds(5); - public Func TFReaderLeaseFactory { get; init; } + public Func TFReaderLeaseFactory { get; init; } } public interface ILogFormatAbstractorFactory { diff --git a/src/EventStore.Core/LogV2/LogV2StreamExistenceFilterInitializer.cs b/src/EventStore.Core/LogV2/LogV2StreamExistenceFilterInitializer.cs index c447889d83c..8282f9bead1 100644 --- a/src/EventStore.Core/LogV2/LogV2StreamExistenceFilterInitializer.cs +++ b/src/EventStore.Core/LogV2/LogV2StreamExistenceFilterInitializer.cs @@ -5,6 +5,7 @@ using EventStore.Core.Exceptions; using EventStore.Core.Index; using EventStore.Core.LogAbstraction; +using EventStore.Core.Services.UserManagement; using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using EventStore.LogCommon; @@ -21,13 +22,13 @@ namespace EventStore.Core.LogV2 { /// of the previous record, which is fine. the net effect is an extra record is initialized /// on startup next time. public class LogV2StreamExistenceFilterInitializer : INameExistenceFilterInitializer { - private readonly Func _tfReaderFactory; + private readonly Func _tfReaderFactory; private readonly ITableIndex _tableIndex; protected static readonly ILogger Log = Serilog.Log.ForContext(); public LogV2StreamExistenceFilterInitializer( - Func tfReaderFactory, + Func tfReaderFactory, ITableIndex tableIndex) { Ensure.NotNull(tableIndex, nameof(tableIndex)); @@ -134,7 +135,7 @@ private void InitializeFromLog(INameExistenceFilter filter) { // whether the checkpoint is the pre or post position of the last processed record. var startPosition = filter.CurrentCheckpoint == -1 ? 0 : filter.CurrentCheckpoint; Log.Information("Initializing from log starting at {startPosition:N0}", startPosition); - using var reader = _tfReaderFactory(); + using var reader = _tfReaderFactory(SystemAccounts.SystemName); reader.Reposition(startPosition); while (TryReadNextLogRecord(reader, out var result)) { diff --git a/src/EventStore.Core/LogV3/EventTypeIdToNameFromStandardIndex.cs b/src/EventStore.Core/LogV3/EventTypeIdToNameFromStandardIndex.cs index be436e67b34..3243cdc0741 100644 --- a/src/EventStore.Core/LogV3/EventTypeIdToNameFromStandardIndex.cs +++ b/src/EventStore.Core/LogV3/EventTypeIdToNameFromStandardIndex.cs @@ -2,6 +2,7 @@ using EventStore.Core.Data; using EventStore.Core.LogAbstraction; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; namespace EventStore.Core.LogV3 { @@ -15,7 +16,7 @@ public EventTypeIdToNameFromStandardIndex(IIndexReader indexReader) { public bool TryGetName(uint eventTypeId, out string name) { var record = _indexReader.ReadPrepare( streamId: LogV3SystemStreams.EventTypesStreamNumber, - eventNumber: EventTypeIdConverter.ToEventNumber(eventTypeId)); + eventNumber: EventTypeIdConverter.ToEventNumber(eventTypeId), tracker: ITransactionFileTracker.NoOp); // noop ok: LogV3 if (record is null) { name = null; @@ -30,7 +31,7 @@ public bool TryGetName(uint eventTypeId, out string name) { } public bool TryGetLastValue(out uint lastValue) { - var lastEventNumber = _indexReader.GetStreamLastEventNumber(LogV3SystemStreams.EventTypesStreamNumber); + var lastEventNumber = _indexReader.GetStreamLastEventNumber(LogV3SystemStreams.EventTypesStreamNumber, ITransactionFileTracker.NoOp); // noop ok: LogV3 var success = ExpectedVersion.NoStream < lastEventNumber && lastEventNumber != EventNumber.DeletedStream; lastValue = EventTypeIdConverter.ToEventTypeId(lastEventNumber); return success; diff --git a/src/EventStore.Core/LogV3/PartitionManager.cs b/src/EventStore.Core/LogV3/PartitionManager.cs index 2fcb41b6327..6080c859d7f 100644 --- a/src/EventStore.Core/LogV3/PartitionManager.cs +++ b/src/EventStore.Core/LogV3/PartitionManager.cs @@ -82,7 +82,7 @@ private void EnsureRootPartitionIsWritten() { private void ReadRootPartition() { SeqReadResult result; _reader.Reposition(0); - while ((result = _reader.TryReadNext()).Success) { + while ((result = _reader.TryReadNext(ITransactionFileTracker.NoOp)).Success) { var rec = result.LogRecord; switch (rec.RecordType) { case LogRecordType.PartitionType: diff --git a/src/EventStore.Core/LogV3/StreamIdToNameFromStandardIndex.cs b/src/EventStore.Core/LogV3/StreamIdToNameFromStandardIndex.cs index aab4f6cdfed..9205e6b0824 100644 --- a/src/EventStore.Core/LogV3/StreamIdToNameFromStandardIndex.cs +++ b/src/EventStore.Core/LogV3/StreamIdToNameFromStandardIndex.cs @@ -2,6 +2,7 @@ using EventStore.Core.Data; using EventStore.Core.LogAbstraction; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.LogRecords; using StreamId = System.UInt32; @@ -21,7 +22,7 @@ public bool TryGetName(StreamId streamId, out string name) { // explicitly create metastreams. var record = _indexReader.ReadPrepare( streamId: LogV3SystemStreams.StreamsCreatedStreamNumber, - eventNumber: StreamIdConverter.ToEventNumber(streamId)); + eventNumber: StreamIdConverter.ToEventNumber(streamId), tracker: ITransactionFileTracker.NoOp); // noop ok: LogV3 if (record is null) { name = null; @@ -36,7 +37,7 @@ public bool TryGetName(StreamId streamId, out string name) { } public bool TryGetLastValue(out StreamId lastValue) { - var lastEventNumber = _indexReader.GetStreamLastEventNumber(LogV3SystemStreams.StreamsCreatedStreamNumber); + var lastEventNumber = _indexReader.GetStreamLastEventNumber(LogV3SystemStreams.StreamsCreatedStreamNumber, ITransactionFileTracker.NoOp); // noop ok: LogV3 var success = ExpectedVersion.NoStream < lastEventNumber && lastEventNumber != EventNumber.DeletedStream; lastValue = StreamIdConverter.ToStreamId(lastEventNumber); return success; diff --git a/src/EventStore.Core/Messages/ITransactionFileTrackerFactoryExtensions.cs b/src/EventStore.Core/Messages/ITransactionFileTrackerFactoryExtensions.cs new file mode 100644 index 00000000000..73e23ad59ad --- /dev/null +++ b/src/EventStore.Core/Messages/ITransactionFileTrackerFactoryExtensions.cs @@ -0,0 +1,24 @@ +using System.Security.Claims; +using EventStore.Core.TransactionLog; + +namespace EventStore.Core.Messages; + +public static class ITransactionFileTrackerFactoryExtensions { + public static ITransactionFileTracker For( + this ITransactionFileTrackerFactory factory, + string username) => + + factory.GetOrAdd(username ?? "anonymous"); + + public static ITransactionFileTracker For( + this ITransactionFileTrackerFactory factory, + ClaimsPrincipal user) => + + factory.For(user?.Identity?.Name); + + public static ITransactionFileTracker For( + this ITransactionFileTrackerFactory factory, + ClientMessage.ReadRequestMessage msg) => + + factory.For(msg?.User); +} diff --git a/src/EventStore.Core/Messaging/IEnvelope.cs b/src/EventStore.Core/Messaging/IEnvelope.cs index 7ab44044a81..96fc3a63a37 100644 --- a/src/EventStore.Core/Messaging/IEnvelope.cs +++ b/src/EventStore.Core/Messaging/IEnvelope.cs @@ -4,5 +4,10 @@ public interface IEnvelope { } public interface IEnvelope : IEnvelope { + static readonly IEnvelope NoOp = new NoOp(); + } + + file class NoOp : IEnvelope { + public void ReplyWith(U message) where U : Message { } } } diff --git a/src/EventStore.Core/MetricsBootstrapper.cs b/src/EventStore.Core/MetricsBootstrapper.cs index cf8efe7dc5c..55fd8a1c956 100644 --- a/src/EventStore.Core/MetricsBootstrapper.cs +++ b/src/EventStore.Core/MetricsBootstrapper.cs @@ -25,7 +25,7 @@ public class Trackers { public GrpcTrackers GrpcTrackers { get; } = new(); public QueueTrackers QueueTrackers { get; set; } = new(); public GossipTrackers GossipTrackers { get; set; } = new (); - public ITransactionFileTracker TransactionFileTracker { get; set; } = new TFChunkTracker.NoOp(); + public ITransactionFileTrackerFactory TransactionFileTrackers { get; set; } = ITransactionFileTrackerFactory.NoOp; public IIndexTracker IndexTracker { get; set; } = new IndexTracker.NoOp(); public IMaxTracker WriterFlushSizeTracker { get; set; } = new MaxTracker.NoOp(); public IDurationMaxTracker WriterFlushDurationTracker { get; set; } = new DurationMaxTracker.NoOp(); @@ -113,10 +113,9 @@ public static void Bootstrap( // events if (conf.Events.TryGetValue(Conf.EventTracker.Read, out var readEnabled) && readEnabled) { - var readTag = new KeyValuePair("activity", "read"); - trackers.TransactionFileTracker = new TFChunkTracker( - readBytes: new CounterSubMetric(byteMetric, new[] {readTag}), - readEvents: new CounterSubMetric(eventMetric, new[] {readTag})); + trackers.TransactionFileTrackers = new TransactionFileTrackerFactory( + eventMetric: eventMetric, + byteMetric: byteMetric); } // from a users perspective an event is written when it is indexed: thats when it can be read. diff --git a/src/EventStore.Core/Services/PersistentSubscription/PersistentSubscriptionService.cs b/src/EventStore.Core/Services/PersistentSubscription/PersistentSubscriptionService.cs index 18cc383892a..ca210adafa3 100644 --- a/src/EventStore.Core/Services/PersistentSubscription/PersistentSubscriptionService.cs +++ b/src/EventStore.Core/Services/PersistentSubscription/PersistentSubscriptionService.cs @@ -13,6 +13,7 @@ using EventStore.Core.Services.TimerService; using EventStore.Core.Services.UserManagement; using EventStore.Core.Telemetry; +using EventStore.Core.TransactionLog; using ILogger = Serilog.ILogger; using ReadStreamResult = EventStore.Core.Data.ReadStreamResult; @@ -57,6 +58,7 @@ public class PersistentSubscriptionService : private readonly IODispatcher _ioDispatcher; private readonly IPublisher _bus; private readonly PersistentSubscriptionConsumerStrategyRegistry _consumerStrategyRegistry; + private readonly ITransactionFileTracker _tfTracker; private readonly IPersistentSubscriptionCheckpointReader _checkpointReader; private readonly IPersistentSubscriptionStreamReader _streamReader; private PersistentSubscriptionConfig _config = new PersistentSubscriptionConfig(); @@ -67,7 +69,8 @@ public class PersistentSubscriptionService : public PersistentSubscriptionService(IQueuedHandler queuedHandler, IReadIndex readIndex, IODispatcher ioDispatcher, IPublisher bus, - PersistentSubscriptionConsumerStrategyRegistry consumerStrategyRegistry) { + PersistentSubscriptionConsumerStrategyRegistry consumerStrategyRegistry, + ITransactionFileTrackerFactory tfTrackers) { Ensure.NotNull(queuedHandler, "queuedHandler"); Ensure.NotNull(readIndex, "readIndex"); Ensure.NotNull(ioDispatcher, "ioDispatcher"); @@ -77,6 +80,7 @@ public PersistentSubscriptionService(IQueuedHandler queuedHandler, IReadIndex : private readonly TFChunkDb _db; private readonly IReadIndex _readIndex; private readonly SemaphoreSlimLock _switchChunksLock; + private readonly ITransactionFileTracker _tfTracker; private const string NewChunkFileExtension = ".tmp"; @@ -36,7 +38,8 @@ public RedactionService( IQueuedHandler queuedHandler, TFChunkDb db, IReadIndex readIndex, - SemaphoreSlimLock switchChunksLock) { + SemaphoreSlimLock switchChunksLock, + ITransactionFileTracker tfTracker) { Ensure.NotNull(queuedHandler, nameof(queuedHandler)); Ensure.NotNull(db, nameof(db)); Ensure.NotNull(readIndex, nameof(readIndex)); @@ -46,6 +49,7 @@ public RedactionService( _db = db; _readIndex = readIndex; _switchChunksLock = switchChunksLock; + _tfTracker = tfTracker; } public void Handle(RedactionMessage.GetEventPosition message) { @@ -61,7 +65,7 @@ public void Handle(RedactionMessage.GetEventPosition message) { private void GetEventPosition(string streamName, long eventNumber, IEnvelope envelope) { var streamId = _readIndex.GetStreamId(streamName); - var result = _readIndex.ReadEventInfo_KeepDuplicates(streamId, eventNumber); + var result = _readIndex.ReadEventInfo_KeepDuplicates(streamId, eventNumber, _tfTracker); var eventPositions = new EventPosition[result.EventInfos.Length]; @@ -70,7 +74,7 @@ private void GetEventPosition(string streamName, long eventNumber, IEnvelope env var logPos = eventInfo.LogPosition; var chunk = _db.Manager.GetChunkFor(logPos); var localPosition = chunk.ChunkHeader.GetLocalLogPosition(logPos); - var chunkEventOffset = chunk.GetActualRawPosition(localPosition); + var chunkEventOffset = chunk.GetActualRawPosition(localPosition, _tfTracker); // all the events returned by ReadEventInfo_KeepDuplicates() must exist in the log // since the log record was read from the chunk to check for hash collisions. @@ -251,7 +255,7 @@ private bool IsValidSwitchChunkRequest(string targetChunkFile, string newChunkFi maxReaderCount: 1, optimizeReadSideCache: false, reduceFileCachePressure: true, - tracker: new TFChunkTracker.NoOp()); + tracker: _tfTracker); } catch (HashValidationException) { failReason = SwitchChunkResult.NewChunkHashInvalid; return false; diff --git a/src/EventStore.Core/Services/Replication/LeaderReplicationService.cs b/src/EventStore.Core/Services/Replication/LeaderReplicationService.cs index 05a7ab888de..483cf8b5101 100644 --- a/src/EventStore.Core/Services/Replication/LeaderReplicationService.cs +++ b/src/EventStore.Core/Services/Replication/LeaderReplicationService.cs @@ -16,6 +16,7 @@ using EventStore.Core.Services.Monitoring.Stats; using EventStore.Core.Services.Storage.EpochManager; using EventStore.Core.Services.Transport.Tcp; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.Chunks.TFChunk; using EventStore.Core.TransactionLog.LogRecords; @@ -62,7 +63,7 @@ public string Name { private readonly IEpochManager _epochManager; private readonly int _clusterSize; private readonly bool _unsafeAllowSurplusNodes; - + private readonly ITransactionFileTracker _tfTracker; private readonly Thread _mainLoopThread; private volatile bool _stop; private readonly QueueStatsCollector _queueStats; @@ -93,6 +94,7 @@ public LeaderReplicationService( IEpochManager epochManager, int clusterSize, bool unsafeAllowSurplusNodes, + ITransactionFileTracker tfTracker, QueueStatsManager queueStatsManager) { Ensure.NotNull(publisher, "publisher"); Ensure.NotEmptyGuid(instanceId, "instanceId"); @@ -108,6 +110,7 @@ public LeaderReplicationService( _epochManager = epochManager; _clusterSize = clusterSize; _unsafeAllowSurplusNodes = unsafeAllowSurplusNodes; + _tfTracker = tfTracker; _queueStats = queueStatsManager.CreateQueueStatsCollector("Leader Replication Service"); _lastRolesAssignmentTimestamp = _stopwatch.Elapsed; @@ -357,7 +360,7 @@ private long SetSubscriptionPosition(ReplicaSubscription sub, Debug.Assert(chunk != null, string.Format( "Chunk for LogPosition {0} (0x{0:X}) is null in LeaderReplicationService! Replica: [{1},C:{2},S:{3}]", logPosition, sub.ReplicaEndPoint, sub.ConnectionId, sub.SubscriptionId)); - var bulkReader = chunk.AcquireReader(); + var bulkReader = chunk.AcquireReader(_tfTracker); if (chunk.ChunkHeader.IsScavenged && (chunkId == Guid.Empty || chunkId != chunk.ChunkHeader.ChunkId)) { var chunkStartPos = chunk.ChunkHeader.ChunkStartPosition; if (verbose) { diff --git a/src/EventStore.Core/Services/Storage/EpochManager/EpochManager.cs b/src/EventStore.Core/Services/Storage/EpochManager/EpochManager.cs index f4d204604f5..e87143d2f14 100644 --- a/src/EventStore.Core/Services/Storage/EpochManager/EpochManager.cs +++ b/src/EventStore.Core/Services/Storage/EpochManager/EpochManager.cs @@ -14,6 +14,7 @@ using EventStore.Core.TransactionLog.LogRecords; using ILogger = Serilog.ILogger; using EventStore.LogCommon; +using EventStore.Core.Services.UserManagement; namespace EventStore.Core.Services.Storage.EpochManager { public abstract class Epochmanager { @@ -30,6 +31,7 @@ public class EpochManager : IEpochManager { private readonly INameIndex _streamNameIndex; private readonly INameIndex _eventTypeIndex; private readonly IPartitionManager _partitionManager; + private readonly ITransactionFileTracker _tfTracker; private readonly Guid _instanceId; private readonly object _locker = new object(); @@ -77,6 +79,7 @@ public EpochManager(IPublisher bus, INameIndex streamNameIndex, INameIndex eventTypeIndex, IPartitionManager partitionManager, + ITransactionFileTrackerFactory tfTrackers, Guid instanceId) { Ensure.NotNull(bus, "bus"); Ensure.Nonnegative(cachedEpochCount, "cachedEpochCount"); @@ -99,6 +102,7 @@ public EpochManager(IPublisher bus, _streamNameIndex = streamNameIndex; _eventTypeIndex = eventTypeIndex; _partitionManager = partitionManager; + _tfTracker = tfTrackers.GetOrAdd(SystemAccounts.SystemEpochManagerName); _instanceId = instanceId; } @@ -117,7 +121,7 @@ private void ReadEpochs(int maxEpochCount) { reader.Reposition(_writer.FlushedPosition); SeqReadResult result; - while ((result = reader.TryReadPrev()).Success) { + while ((result = reader.TryReadPrev(_tfTracker)).Success) { var rec = result.LogRecord; if (rec.RecordType != LogRecordType.System || ((ISystemLogRecord)rec).SystemRecordType != SystemRecordType.Epoch) @@ -147,7 +151,7 @@ private void ReadEpochs(int maxEpochCount) { } } private EpochRecord ReadEpochAt(ITransactionFileReader reader, long epochPos) { - var result = reader.TryReadAt(epochPos, couldBeScavenged: false); + var result = reader.TryReadAt(epochPos, couldBeScavenged: false, tracker: _tfTracker); if (!result.Success) throw new Exception($"Could not find Epoch record at LogPosition {epochPos}."); if (result.LogRecord.RecordType != LogRecordType.System) @@ -201,7 +205,7 @@ public EpochRecord GetEpochAfter(int epochNumber, bool throwIfNotFound) { try { epoch = firstEpoch; do { - var result = reader.TryReadAt(epoch.PrevEpochPosition, couldBeScavenged: false); + var result = reader.TryReadAt(epoch.PrevEpochPosition, couldBeScavenged: false, tracker: _tfTracker); if (!result.Success) throw new Exception( $"Could not find Epoch record at LogPosition {epoch.PrevEpochPosition}."); @@ -255,7 +259,7 @@ public bool IsCorrectEpochAt(long epochPosition, int epochNumber, Guid epochId) // epochNumber < _minCachedEpochNumber var reader = _readers.Get(); try { - var res = reader.TryReadAt(epochPosition, couldBeScavenged: false); + var res = reader.TryReadAt(epochPosition, couldBeScavenged: false, tracker: _tfTracker); if (!res.Success || res.LogRecord.RecordType != LogRecordType.System) return false; var sysRec = (ISystemLogRecord)res.LogRecord; @@ -381,13 +385,13 @@ bool TryGetExpectedVersionForEpochInformation(EpochRecord epoch, out long expect reader.Reposition(epoch.PrevEpochPosition); // read the epoch - var result = reader.TryReadNext(); + var result = reader.TryReadNext(_tfTracker); if (!result.Success) return false; // read the epoch-information (if there is one) while (true) { - result = reader.TryReadNext(); + result = reader.TryReadNext(_tfTracker); if (!result.Success) return false; diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/AllReader.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/AllReader.cs index f346950066d..a3cf4985fb9 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/AllReader.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/AllReader.cs @@ -13,27 +13,31 @@ public interface IAllReader { /// Returns event records in the sequence they were committed into TF. /// Positions is specified as pre-positions (pointer at the beginning of the record). /// - IndexReadAllResult ReadAllEventsForward(TFPos pos, int maxCount); + IndexReadAllResult ReadAllEventsForward(TFPos pos, int maxCount, + ITransactionFileTracker tracker); /// /// Returns event records whose eventType matches the given in the sequence they were committed into TF. /// Positions is specified as pre-positions (pointer at the beginning of the record). /// IndexReadAllResult FilteredReadAllEventsForward(TFPos pos, int maxCount, int maxSearchWindow, - IEventFilter eventFilter); + IEventFilter eventFilter, + ITransactionFileTracker tracker); /// /// Returns event records in the reverse sequence they were committed into TF. /// Positions is specified as post-positions (pointer after the end of record). /// - IndexReadAllResult ReadAllEventsBackward(TFPos pos, int maxCount); + IndexReadAllResult ReadAllEventsBackward(TFPos pos, int maxCount, + ITransactionFileTracker tracker); /// /// Returns event records whose eventType matches the given in the sequence they were committed into TF. /// Positions is specified as pre-positions (pointer at the beginning of the record). /// IndexReadAllResult FilteredReadAllEventsBackward(TFPos pos, int maxCount, int maxSearchWindow, - IEventFilter eventFilter); + IEventFilter eventFilter, + ITransactionFileTracker tracker); } public class AllReader : IAllReader { @@ -53,18 +57,20 @@ public AllReader(IIndexBackend backend, IIndexCommitter indexCommitter, INameLoo _eventTypes = eventTypes; } - public IndexReadAllResult ReadAllEventsForward(TFPos pos, int maxCount) { - return ReadAllEventsForwardInternal(pos, maxCount, int.MaxValue, EventFilter.DefaultAllFilter); + public IndexReadAllResult ReadAllEventsForward(TFPos pos, int maxCount, ITransactionFileTracker tracker) { + return ReadAllEventsForwardInternal(pos, maxCount, int.MaxValue, EventFilter.DefaultAllFilter, tracker); } public IndexReadAllResult FilteredReadAllEventsForward(TFPos pos, int maxCount, int maxSearchWindow, - IEventFilter eventFilter) { - return ReadAllEventsForwardInternal(pos, maxCount, maxSearchWindow, eventFilter); + IEventFilter eventFilter, + ITransactionFileTracker tracker) { + return ReadAllEventsForwardInternal(pos, maxCount, maxSearchWindow, eventFilter, tracker); } private IndexReadAllResult ReadAllEventsForwardInternal(TFPos pos, int maxCount, int maxSearchWindow, - IEventFilter eventFilter) { + IEventFilter eventFilter, + ITransactionFileTracker tracker) { var records = new List(); var nextPos = pos; // in case we are at position after which there is no commit at all, in that case we have to force @@ -74,7 +80,7 @@ private IndexReadAllResult ReadAllEventsForwardInternal(TFPos pos, int maxCount, var consideredEventsCount = 0L; var firstCommit = true; var reachedEndOfStream = false; - using (var reader = _backend.BorrowReader()) { + using (var reader = _backend.BorrowReader(tracker)) { long nextCommitPos = pos.CommitPosition; while (records.Count < maxCount && consideredEventsCount < maxSearchWindow) { if (nextCommitPos > _indexCommitter.LastIndexedPosition) { @@ -181,17 +187,19 @@ private IndexReadAllResult ReadAllEventsForwardInternal(TFPos pos, int maxCount, } } - public IndexReadAllResult ReadAllEventsBackward(TFPos pos, int maxCount) { - return ReadAllEventsBackwardInternal(pos, maxCount, int.MaxValue, EventFilter.DefaultAllFilter); + public IndexReadAllResult ReadAllEventsBackward(TFPos pos, int maxCount, ITransactionFileTracker tracker) { + return ReadAllEventsBackwardInternal(pos, maxCount, int.MaxValue, EventFilter.DefaultAllFilter, tracker); } public IndexReadAllResult FilteredReadAllEventsBackward(TFPos pos, int maxCount, int maxSearchWindow, - IEventFilter eventFilter) { - return ReadAllEventsBackwardInternal(pos, maxCount, maxSearchWindow, eventFilter); + IEventFilter eventFilter, + ITransactionFileTracker tracker) { + return ReadAllEventsBackwardInternal(pos, maxCount, maxSearchWindow, eventFilter, tracker); } private IndexReadAllResult ReadAllEventsBackwardInternal(TFPos pos, int maxCount, int maxSearchWindow, - IEventFilter eventFilter) { + IEventFilter eventFilter, + ITransactionFileTracker tracker) { var records = new List(); var nextPos = pos; // in case we are at position after which there is no commit at all, in that case we have to force @@ -201,7 +209,7 @@ private IndexReadAllResult ReadAllEventsBackwardInternal(TFPos pos, int maxCount var consideredEventsCount = 0L; bool firstCommit = true; var reachedEndOfStream = false; - using (var reader = _backend.BorrowReader()) { + using (var reader = _backend.BorrowReader(tracker)) { long nextCommitPostPos = pos.CommitPosition; while (records.Count < maxCount && consideredEventsCount < maxSearchWindow) { reader.Reposition(nextCommitPostPos); diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/IReadIndex.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/IReadIndex.cs index 3738285f3df..78c429315af 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/IReadIndex.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/IReadIndex.cs @@ -2,6 +2,7 @@ using System.Security.Claims; using EventStore.Core.Data; using EventStore.Core.Messages; +using EventStore.Core.TransactionLog; using EventStore.Core.Util; namespace EventStore.Core.Services.Storage.ReaderIndex { @@ -14,27 +15,29 @@ public interface IReadIndex { /// Returns event records in the sequence they were committed into TF. /// Positions is specified as pre-positions (pointer at the beginning of the record). /// - IndexReadAllResult ReadAllEventsForward(TFPos pos, int maxCount); + IndexReadAllResult ReadAllEventsForward(TFPos pos, int maxCount, ITransactionFileTracker tracker); /// /// Returns event records in the reverse sequence they were committed into TF. /// Positions is specified as post-positions (pointer after the end of record). /// - IndexReadAllResult ReadAllEventsBackward(TFPos pos, int maxCount); + IndexReadAllResult ReadAllEventsBackward(TFPos pos, int maxCount, ITransactionFileTracker tracker); /// /// Returns event records whose eventType matches the given EventFilter in the sequence they were committed into TF. /// Positions is specified as pre-positions (pointer at the beginning of the record). /// IndexReadAllResult ReadAllEventsForwardFiltered(TFPos pos, int maxCount, int maxSearchWindow, - IEventFilter eventFilter); + IEventFilter eventFilter, + ITransactionFileTracker tracker); /// /// Returns event records whose eventType matches the given EventFilter in the sequence they were committed into TF. /// Positions is specified as pre-positions (pointer at the beginning of the record). /// IndexReadAllResult ReadAllEventsBackwardFiltered(TFPos pos, int maxCount, int maxSearchWindow, - IEventFilter eventFilter); + IEventFilter eventFilter, + ITransactionFileTracker tracker); void Close(); void Dispose(); @@ -48,32 +51,32 @@ public interface IReadIndex : IReadIndex { // - duplicates are removed, keeping only the earliest event in the log // - streamId drives the read, streamName is only for populating on the result. // this was less messy than safely adding the streamName to the EventRecord at some point after construction. - IndexReadEventResult ReadEvent(string streamName, TStreamId streamId, long eventNumber); - IndexReadStreamResult ReadStreamEventsBackward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount); - IndexReadStreamResult ReadStreamEventsForward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount); + IndexReadEventResult ReadEvent(string streamName, TStreamId streamId, long eventNumber, ITransactionFileTracker tracker); + IndexReadStreamResult ReadStreamEventsBackward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount, ITransactionFileTracker tracker); + IndexReadStreamResult ReadStreamEventsForward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount, ITransactionFileTracker tracker); // ReadEventInfo_KeepDuplicates() : // - deleted events are not filtered out // - duplicates are kept, in ascending order of log position // - next event number is always -1 - IndexReadEventInfoResult ReadEventInfo_KeepDuplicates(TStreamId streamId, long eventNumber); + IndexReadEventInfoResult ReadEventInfo_KeepDuplicates(TStreamId streamId, long eventNumber, ITransactionFileTracker tracker); // ReadEventInfo*Collisions() : // - deleted events are not filtered out // - duplicates are removed, keeping only the earliest event in the log // - only events that are before "beforePosition" in the transaction log are returned - IndexReadEventInfoResult ReadEventInfoForward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, long beforePosition); + IndexReadEventInfoResult ReadEventInfoForward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, long beforePosition, ITransactionFileTracker tracker); IndexReadEventInfoResult ReadEventInfoForward_NoCollisions(ulong stream, long fromEventNumber, int maxCount, long beforePosition); - IndexReadEventInfoResult ReadEventInfoBackward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, long beforePosition); - IndexReadEventInfoResult ReadEventInfoBackward_NoCollisions(ulong stream, Func getStreamId, long fromEventNumber, int maxCount, long beforePosition); + IndexReadEventInfoResult ReadEventInfoBackward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, long beforePosition, ITransactionFileTracker tracker); + IndexReadEventInfoResult ReadEventInfoBackward_NoCollisions(ulong stream, Func getStreamId, long fromEventNumber, int maxCount, long beforePosition, ITransactionFileTracker tracker); - bool IsStreamDeleted(TStreamId streamId); - long GetStreamLastEventNumber(TStreamId streamId); - long GetStreamLastEventNumber_KnownCollisions(TStreamId streamId, long beforePosition); - long GetStreamLastEventNumber_NoCollisions(ulong stream, Func getStreamId, long beforePosition); - StreamMetadata GetStreamMetadata(TStreamId streamId); - StorageMessage.EffectiveAcl GetEffectiveAcl(TStreamId streamId); - TStreamId GetEventStreamIdByTransactionId(long transactionId); + bool IsStreamDeleted(TStreamId streamId, ITransactionFileTracker tracker); + long GetStreamLastEventNumber(TStreamId streamId, ITransactionFileTracker tracker); + long GetStreamLastEventNumber_KnownCollisions(TStreamId streamId, long beforePosition, ITransactionFileTracker tracker); + long GetStreamLastEventNumber_NoCollisions(ulong stream, Func getStreamId, long beforePosition, ITransactionFileTracker tracker); + StreamMetadata GetStreamMetadata(TStreamId streamId, ITransactionFileTracker tracker); + StorageMessage.EffectiveAcl GetEffectiveAcl(TStreamId streamId, ITransactionFileTracker tracker); + TStreamId GetEventStreamIdByTransactionId(long transactionId, ITransactionFileTracker tracker); TStreamId GetStreamId(string streamName); string GetStreamName(TStreamId streamId); diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/IndexBackend.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/IndexBackend.cs index f093bfefdeb..d6357d2a70d 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/IndexBackend.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/IndexBackend.cs @@ -12,7 +12,7 @@ namespace EventStore.Core.Services.Storage.ReaderIndex { public interface IIndexBackend { - TFReaderLease BorrowReader(); + TFReaderLease BorrowReader(ITransactionFileTracker tracker); void SetSystemSettings(SystemSettings systemSettings); SystemSettings GetSystemSettings(); } @@ -48,8 +48,8 @@ public IndexBackend( _streamMetadataCache = streamMetadataCache; } - public TFReaderLease BorrowReader() { - return new TFReaderLease(_readers); + public TFReaderLease BorrowReader(ITransactionFileTracker tracker) { + return new TFReaderLease(_readers, tracker); } public EventNumberCached TryGetStreamLastEventNumber(TStreamId streamId) { diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/IndexCommitter.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/IndexCommitter.cs index 77cd4c48e8c..ce89bca2868 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/IndexCommitter.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/IndexCommitter.cs @@ -48,6 +48,7 @@ public class IndexCommitter : IndexCommitter, IIndexCommitter 0) { if (_additionalCommitChecks && cacheLastEventNumber) { - CheckStreamVersion(streamId, indexEntries[0].Version, commit); + CheckStreamVersion(streamId, indexEntries[0].Version, commit, _tfTracker); CheckDuplicateEvents(streamId, commit, indexEntries, prepares); } @@ -398,7 +401,7 @@ public long Commit(IList> commitedPrepares, bool is if (indexEntries.Count > 0) { if (_additionalCommitChecks && cacheLastEventNumber) { - CheckStreamVersion(streamId, indexEntries[0].Version, null); // TODO AN: bad passing null commit + CheckStreamVersion(streamId, indexEntries[0].Version, null, _tfTracker); // TODO AN: bad passing null commit CheckDuplicateEvents(streamId, null, indexEntries, prepares); // TODO AN: bad passing null commit } @@ -450,7 +453,7 @@ public long Commit(IList> commitedPrepares, bool is } private IEnumerable> GetTransactionPrepares(long transactionPos, long commitPos) { - using (var reader = _backend.BorrowReader()) { + using (var reader = _backend.BorrowReader(_tfTracker)) { reader.Reposition(transactionPos); // in case all prepares were scavenged, we should not read past Commit LogPosition @@ -469,11 +472,12 @@ private IEnumerable> GetTransactionPrepares(long tr } } - private void CheckStreamVersion(TStreamId streamId, long newEventNumber, CommitLogRecord commit) { + private void CheckStreamVersion(TStreamId streamId, long newEventNumber, CommitLogRecord commit, + ITransactionFileTracker tracker) { if (newEventNumber == EventNumber.DeletedStream) return; - long lastEventNumber = _indexReader.GetStreamLastEventNumber(streamId); + long lastEventNumber = _indexReader.GetStreamLastEventNumber(streamId, tracker); if (newEventNumber != lastEventNumber + 1) { if (Debugger.IsAttached) Debugger.Break(); @@ -487,7 +491,7 @@ private void CheckStreamVersion(TStreamId streamId, long newEventNumber, CommitL private void CheckDuplicateEvents(TStreamId streamId, CommitLogRecord commit, IList> indexEntries, IList> prepares) { - using (var reader = _backend.BorrowReader()) { + using (var reader = _backend.BorrowReader(_tfTracker)) { var entries = _tableIndex.GetRange(streamId, indexEntries[0].Version, indexEntries[indexEntries.Count - 1].Version); foreach (var indexEntry in entries) { @@ -508,7 +512,7 @@ private void CheckDuplicateEvents(TStreamId streamId, CommitLogRecord commit, IL } private SystemSettings GetSystemSettings() { - var res = _indexReader.ReadEvent(IndexReader.UnspecifiedStreamName, _systemStreams.SettingsStream, -1); + var res = _indexReader.ReadEvent(IndexReader.UnspecifiedStreamName, _systemStreams.SettingsStream, -1, _tfTracker); return res.Result == ReadEventResult.Success ? DeserializeSystemSettings(res.Record.Data) : null; } diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/IndexReader.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/IndexReader.cs index 271434fe48c..58ab19f29b2 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/IndexReader.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/IndexReader.cs @@ -21,27 +21,27 @@ public interface IIndexReader { // streamId drives the read, streamName is only for populating on the result. // this was less messy than safely adding the streamName to the EventRecord at some point after construction - IndexReadEventResult ReadEvent(string streamName, TStreamId streamId, long eventNumber); - IndexReadStreamResult ReadStreamEventsForward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount); - IndexReadStreamResult ReadStreamEventsBackward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount); - StorageMessage.EffectiveAcl GetEffectiveAcl(TStreamId streamId); - IndexReadEventInfoResult ReadEventInfo_KeepDuplicates(TStreamId streamId, long eventNumber); - IndexReadEventInfoResult ReadEventInfoForward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, long beforePosition); + IndexReadEventResult ReadEvent(string streamName, TStreamId streamId, long eventNumber, ITransactionFileTracker tracker); + IndexReadStreamResult ReadStreamEventsForward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount, ITransactionFileTracker tracker); + IndexReadStreamResult ReadStreamEventsBackward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount, ITransactionFileTracker tracker); + StorageMessage.EffectiveAcl GetEffectiveAcl(TStreamId streamId, ITransactionFileTracker tracker); + IndexReadEventInfoResult ReadEventInfo_KeepDuplicates(TStreamId streamId, long eventNumber, ITransactionFileTracker tracker); + IndexReadEventInfoResult ReadEventInfoForward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, long beforePosition, ITransactionFileTracker tracker); IndexReadEventInfoResult ReadEventInfoForward_NoCollisions(ulong stream, long fromEventNumber, int maxCount, long beforePosition); - IndexReadEventInfoResult ReadEventInfoBackward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, long beforePosition); - IndexReadEventInfoResult ReadEventInfoBackward_NoCollisions(ulong stream, Func getStreamId, long fromEventNumber, int maxCount, long beforePosition); + IndexReadEventInfoResult ReadEventInfoBackward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, long beforePosition, ITransactionFileTracker tracker); + IndexReadEventInfoResult ReadEventInfoBackward_NoCollisions(ulong stream, Func getStreamId, long fromEventNumber, int maxCount, long beforePosition, ITransactionFileTracker tracker); /// /// Doesn't filter $maxAge, $maxCount, $tb(truncate before), doesn't check stream deletion, etc. /// - IPrepareLogRecord ReadPrepare(TStreamId streamId, long eventNumber); + IPrepareLogRecord ReadPrepare(TStreamId streamId, long eventNumber, ITransactionFileTracker tracker); - TStreamId GetEventStreamIdByTransactionId(long transactionId); + TStreamId GetEventStreamIdByTransactionId(long transactionId, ITransactionFileTracker tracker); - StreamMetadata GetStreamMetadata(TStreamId streamId); - long GetStreamLastEventNumber(TStreamId streamId); - long GetStreamLastEventNumber_KnownCollisions(TStreamId streamId, long beforePosition); - long GetStreamLastEventNumber_NoCollisions(ulong stream, Func getStreamId, long beforePosition); + StreamMetadata GetStreamMetadata(TStreamId streamId, ITransactionFileTracker tracker); + long GetStreamLastEventNumber(TStreamId streamId, ITransactionFileTracker tracker); + long GetStreamLastEventNumber_KnownCollisions(TStreamId streamId, long beforePosition, ITransactionFileTracker tracker); + long GetStreamLastEventNumber_NoCollisions(ulong stream, Func getStreamId, long beforePosition, ITransactionFileTracker tracker); } public abstract class IndexReader { @@ -105,11 +105,12 @@ public IndexReader( _skipIndexScanOnRead = skipIndexScanOnRead; } - IndexReadEventResult IIndexReader.ReadEvent(string streamName, TStreamId streamId, long eventNumber) { + IndexReadEventResult IIndexReader.ReadEvent(string streamName, TStreamId streamId, long eventNumber, + ITransactionFileTracker tracker) { Ensure.Valid(streamId, _validator); if (eventNumber < -1) throw new ArgumentOutOfRangeException("eventNumber"); - using (var reader = _backend.BorrowReader()) { + using (var reader = _backend.BorrowReader(tracker)) { return ReadEventInternal(reader, streamName, streamId, eventNumber); } } @@ -154,8 +155,9 @@ private IndexReadEventResult ReadEventInternal(TFReaderLease reader, string stre originalStreamExists: originalStreamExists); } - IPrepareLogRecord IIndexReader.ReadPrepare(TStreamId streamId, long eventNumber) { - using (var reader = _backend.BorrowReader()) { + IPrepareLogRecord IIndexReader.ReadPrepare(TStreamId streamId, long eventNumber, + ITransactionFileTracker tracker) { + using (var reader = _backend.BorrowReader(tracker)) { return ReadPrepareInternal(reader, streamId, eventNumber); } } @@ -216,17 +218,18 @@ protected static IPrepareLogRecord ReadPrepareInternal(TFReaderLease } IndexReadStreamResult IIndexReader. - ReadStreamEventsForward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount) { - return ReadStreamEventsForwardInternal(streamName, streamId, fromEventNumber, maxCount, _skipIndexScanOnRead); + ReadStreamEventsForward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount, ITransactionFileTracker tracker) { + return ReadStreamEventsForwardInternal(streamName, streamId, fromEventNumber, maxCount, _skipIndexScanOnRead, tracker); } private IndexReadStreamResult ReadStreamEventsForwardInternal(string streamName, TStreamId streamId, long fromEventNumber, - int maxCount, bool skipIndexScanOnRead) { + int maxCount, bool skipIndexScanOnRead, + ITransactionFileTracker tracker) { Ensure.Valid(streamId, _validator); Ensure.Nonnegative(fromEventNumber, "fromEventNumber"); Ensure.Positive(maxCount, "maxCount"); - using (var reader = _backend.BorrowReader()) { + using (var reader = _backend.BorrowReader(tracker)) { var lastEventNumber = GetStreamLastEventNumberCached(reader, streamId); var metadata = GetStreamMetadataCached(reader, streamId); if (lastEventNumber == EventNumber.DeletedStream) @@ -502,8 +505,8 @@ delegate IEnumerable ReadIndexEntries( (indexReader, streamHandle, reader, startEventNumber, endEventNumber) => indexReader._tableIndex.GetRange(streamHandle, startEventNumber, endEventNumber); - public IndexReadEventInfoResult ReadEventInfo_KeepDuplicates(TStreamId streamId, long eventNumber) { - using (var reader = _backend.BorrowReader()) { + public IndexReadEventInfoResult ReadEventInfo_KeepDuplicates(TStreamId streamId, long eventNumber, ITransactionFileTracker tracker) { + using (var reader = _backend.BorrowReader(tracker)) { var result = ReadEventInfoForwardInternal( streamId, reader, @@ -521,8 +524,9 @@ public IndexReadEventInfoResult ReadEventInfo_KeepDuplicates(TStreamId streamId, } // note for simplicity skipIndexScanOnRead is always treated as false. see ReadEventInfoInternal - public IndexReadEventInfoResult ReadEventInfoForward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, long beforePosition) { - using (var reader = _backend.BorrowReader()) { + public IndexReadEventInfoResult ReadEventInfoForward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, long beforePosition, + ITransactionFileTracker tracker) { + using (var reader = _backend.BorrowReader(tracker)) { return ReadEventInfoForwardInternal( streamId, reader, @@ -593,16 +597,17 @@ private IndexReadEventInfoResult ReadEventInfoForwardInternal( } IndexReadStreamResult IIndexReader. - ReadStreamEventsBackward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount) { - return ReadStreamEventsBackwardInternal(streamName, streamId, fromEventNumber, maxCount, _skipIndexScanOnRead); + ReadStreamEventsBackward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount, ITransactionFileTracker tracker) { + return ReadStreamEventsBackwardInternal(streamName, streamId, fromEventNumber, maxCount, _skipIndexScanOnRead, tracker); } private IndexReadStreamResult ReadStreamEventsBackwardInternal(string streamName, TStreamId streamId, long fromEventNumber, - int maxCount, bool skipIndexScanOnRead) { + int maxCount, bool skipIndexScanOnRead, + ITransactionFileTracker tracker) { Ensure.Valid(streamId, _validator); Ensure.Positive(maxCount, "maxCount"); - using (var reader = _backend.BorrowReader()) { + using (var reader = _backend.BorrowReader(tracker)) { var lastEventNumber = GetStreamLastEventNumberCached(reader, streamId); var metadata = GetStreamMetadataCached(reader, streamId); if (lastEventNumber == EventNumber.DeletedStream) @@ -661,14 +666,14 @@ private IndexReadStreamResult ReadStreamEventsBackwardInternal(string streamName } public IndexReadEventInfoResult ReadEventInfoBackward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, - long beforePosition) { + long beforePosition, ITransactionFileTracker tracker) { if (fromEventNumber < 0) - fromEventNumber = GetStreamLastEventNumber_KnownCollisions(streamId, beforePosition); + fromEventNumber = GetStreamLastEventNumber_KnownCollisions(streamId, beforePosition, tracker); if (fromEventNumber == ExpectedVersion.NoStream) return new IndexReadEventInfoResult(new EventInfo[] { }, -1); - using (var reader = _backend.BorrowReader()) { + using (var reader = _backend.BorrowReader(tracker)) { return ReadEventInfoBackwardInternal( streamId, reader, @@ -693,10 +698,11 @@ public IndexReadEventInfoResult ReadEventInfoBackward_NoCollisions( Func getStreamId, long fromEventNumber, int maxCount, - long beforePosition) { + long beforePosition, + ITransactionFileTracker tracker) { if (fromEventNumber < 0) - fromEventNumber = GetStreamLastEventNumber_NoCollisions(stream, getStreamId, beforePosition); + fromEventNumber = GetStreamLastEventNumber_NoCollisions(stream, getStreamId, beforePosition, tracker); if (fromEventNumber == ExpectedVersion.NoStream) return new IndexReadEventInfoResult(new EventInfo[] { }, -1); @@ -800,16 +806,16 @@ private EventInfo[] ReadEventInfoInternal( return result; } - public TStreamId GetEventStreamIdByTransactionId(long transactionId) { + public TStreamId GetEventStreamIdByTransactionId(long transactionId, ITransactionFileTracker tracker) { Ensure.Nonnegative(transactionId, "transactionId"); - using (var reader = _backend.BorrowReader()) { + using (var reader = _backend.BorrowReader(tracker)) { var res = ReadPrepareInternal(reader, transactionId); return res == null ? default : res.EventStreamId; } } - public StorageMessage.EffectiveAcl GetEffectiveAcl(TStreamId streamId) { - using (var reader = _backend.BorrowReader()) { + public StorageMessage.EffectiveAcl GetEffectiveAcl(TStreamId streamId, ITransactionFileTracker tracker) { + using (var reader = _backend.BorrowReader(tracker)) { var sysSettings = _backend.GetSystemSettings() ?? SystemSettings.Default; StreamAcl acl; StreamAcl sysAcl; @@ -828,16 +834,16 @@ public StorageMessage.EffectiveAcl GetEffectiveAcl(TStreamId streamId) { } } - long IIndexReader.GetStreamLastEventNumber(TStreamId streamId) { + long IIndexReader.GetStreamLastEventNumber(TStreamId streamId, ITransactionFileTracker tracker) { Ensure.Valid(streamId, _validator); - using (var reader = _backend.BorrowReader()) { + using (var reader = _backend.BorrowReader(tracker)) { return GetStreamLastEventNumberCached(reader, streamId); } } - public long GetStreamLastEventNumber_KnownCollisions(TStreamId streamId, long beforePosition) { + public long GetStreamLastEventNumber_KnownCollisions(TStreamId streamId, long beforePosition, ITransactionFileTracker tracker) { Ensure.Valid(streamId, _validator); - using (var reader = _backend.BorrowReader()) { + using (var reader = _backend.BorrowReader(tracker)) { return GetStreamLastEventNumber_KnownCollisions(streamId, beforePosition, reader); } } @@ -858,8 +864,9 @@ bool IsForThisStream(IndexEntry indexEntry) { // gets the last event number before beforePosition for the given stream hash. can assume that // the hash does not collide with anything before beforePosition. - public long GetStreamLastEventNumber_NoCollisions(ulong stream, Func getStreamId, long beforePosition) { - using (var reader = _backend.BorrowReader()) { + public long GetStreamLastEventNumber_NoCollisions(ulong stream, Func getStreamId, long beforePosition, + ITransactionFileTracker tracker) { + using (var reader = _backend.BorrowReader(tracker)) { return GetStreamLastEventNumber_NoCollisions(stream, getStreamId, beforePosition, reader); } } @@ -892,9 +899,9 @@ bool IsForThisStream(IndexEntry indexEntry) { return entry.Version; } - StreamMetadata IIndexReader.GetStreamMetadata(TStreamId streamId) { + StreamMetadata IIndexReader.GetStreamMetadata(TStreamId streamId, ITransactionFileTracker tracker) { Ensure.Valid(streamId, _validator); - using (var reader = _backend.BorrowReader()) { + using (var reader = _backend.BorrowReader(tracker)) { return GetStreamMetadataCached(reader, streamId); } } diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/IndexWriter.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/IndexWriter.cs index b52ef7dd05f..c4cd728120a 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/IndexWriter.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/IndexWriter.cs @@ -68,6 +68,7 @@ public long NotCachedTransInfo { private readonly INameLookup _streamNames; private readonly ISystemStreamLookup _systemStreams; private readonly TStreamId _emptyStreamId; + private readonly ITransactionFileTracker _tracker; private readonly IStickyLRUCache> _transactionInfoCache = new StickyLRUCache>(ESConsts.TransactionMetadataCacheCapacity); @@ -94,6 +95,7 @@ public IndexWriter( INameLookup streamNames, ISystemStreamLookup systemStreams, TStreamId emptyStreamId, + ITransactionFileTracker tfTracker, ISizer inMemorySizer) { Ensure.NotNull(indexBackend, "indexBackend"); Ensure.NotNull(indexReader, "indexReader"); @@ -110,6 +112,7 @@ public IndexWriter( _streamNames = streamNames; _systemStreams = systemStreams; _emptyStreamId = emptyStreamId; + _tracker = tfTracker; } public void Reset() { @@ -123,7 +126,7 @@ public void Reset() { public CommitCheckResult CheckCommitStartingAt(long transactionPosition, long commitPosition) { TStreamId streamId; long expectedVersion; - using (var reader = _indexBackend.BorrowReader()) { + using (var reader = _indexBackend.BorrowReader(_tracker)) { try { var prepare = GetPrepare(reader, transactionPosition); if (prepare == null) { @@ -213,11 +216,11 @@ public CommitCheckResult CheckCommit(TStreamId streamId, long expecte if(first) /*no data in transaction*/ return new CommitCheckResult(CommitDecision.Ok, streamId, curVersion, -1, -1, IsSoftDeleted(streamId)); else{ - var isReplicated = _indexReader.GetStreamLastEventNumber(streamId) >= endEventNumber; + var isReplicated = _indexReader.GetStreamLastEventNumber(streamId, _tracker) >= endEventNumber; //TODO(clc): the new index should hold the log positions removing this read //n.b. the index will never have the event in the case of NotReady as it only committed records are indexed //in that case the position will need to come from the pre-index - var idempotentEvent = _indexReader.ReadEvent(IndexReader.UnspecifiedStreamName, streamId, endEventNumber); + var idempotentEvent = _indexReader.ReadEvent(IndexReader.UnspecifiedStreamName, streamId, endEventNumber, _tracker); var logPos = idempotentEvent.Result == ReadEventResult.Success ? idempotentEvent.Record.LogPosition : -1; if(isReplicated) @@ -238,7 +241,7 @@ public CommitCheckResult CheckCommit(TStreamId streamId, long expecte && prepInfo.EventNumber == eventNumber) continue; - var res = _indexReader.ReadPrepare(streamId, eventNumber); + var res = _indexReader.ReadPrepare(streamId, eventNumber, _tracker); if (res != null && res.EventId == eventId) continue; @@ -257,11 +260,11 @@ public CommitCheckResult CheckCommit(TStreamId streamId, long expecte if(eventNumber == expectedVersion) /* no data in transaction */ return new CommitCheckResult(CommitDecision.WrongExpectedVersion, streamId, curVersion, -1, -1, false); else{ - var isReplicated = _indexReader.GetStreamLastEventNumber(streamId) >= eventNumber; + var isReplicated = _indexReader.GetStreamLastEventNumber(streamId, _tracker) >= eventNumber; //TODO(clc): the new index should hold the log positions removing this read //n.b. the index will never have the event in the case of NotReady as it only committed records are indexed //in that case the position will need to come from the pre-index - var idempotentEvent = _indexReader.ReadEvent(IndexReader.UnspecifiedStreamName, streamId, eventNumber); + var idempotentEvent = _indexReader.ReadEvent(IndexReader.UnspecifiedStreamName, streamId, eventNumber, _tracker); var logPos = idempotentEvent.Result == ReadEventResult.Success ? idempotentEvent.Record.LogPosition : -1; if(isReplicated) @@ -362,7 +365,7 @@ public TransactionInfo GetTransactionInfo(long writerCheckpoint, long private bool GetTransactionInfoUncached(long writerCheckpoint, long transactionId, out TransactionInfo transactionInfo) { - using (var reader = _indexBackend.BorrowReader()) { + using (var reader = _indexBackend.BorrowReader(_tracker)) { reader.Reposition(writerCheckpoint); SeqReadResult result; while ((result = reader.TryReadPrev()).Success) { @@ -424,7 +427,7 @@ public void PurgeNotProcessedTransactions(long checkpoint) { } private IEnumerable> GetTransactionPrepares(long transactionPos, long commitPos) { - using (var reader = _indexBackend.BorrowReader()) { + using (var reader = _indexBackend.BorrowReader(_tracker)) { reader.Reposition(transactionPos); // in case all prepares were scavenged, we should not read past Commit LogPosition @@ -459,7 +462,7 @@ public long GetStreamLastEventNumber(TStreamId streamId) { long lastEventNumber; if (_streamVersions.TryGet(streamId, out lastEventNumber)) return lastEventNumber; - return _indexReader.GetStreamLastEventNumber(streamId); + return _indexReader.GetStreamLastEventNumber(streamId, _tracker); } public StreamMetadata GetStreamMetadata(TStreamId streamId) { @@ -472,7 +475,7 @@ public StreamMetadata GetStreamMetadata(TStreamId streamId) { return m; } - return _indexReader.GetStreamMetadata(streamId); + return _indexReader.GetStreamMetadata(streamId, _tracker); } public RawMetaInfo GetStreamRawMeta(TStreamId streamId) { @@ -481,7 +484,7 @@ public RawMetaInfo GetStreamRawMeta(TStreamId streamId) { StreamMeta meta; if (!_streamRawMetas.TryGet(streamId, out meta)) - meta = new StreamMeta(_indexReader.ReadPrepare(metastreamId, metaLastEventNumber).Data, null); + meta = new StreamMeta(_indexReader.ReadPrepare(metastreamId, metaLastEventNumber, _tracker).Data, null); return new RawMetaInfo(metaLastEventNumber, meta.RawMeta); } diff --git a/src/EventStore.Core/Services/Storage/ReaderIndex/ReadIndex.cs b/src/EventStore.Core/Services/Storage/ReaderIndex/ReadIndex.cs index 46b1e427e03..d603647876a 100644 --- a/src/EventStore.Core/Services/Storage/ReaderIndex/ReadIndex.cs +++ b/src/EventStore.Core/Services/Storage/ReaderIndex/ReadIndex.cs @@ -10,6 +10,7 @@ using EventStore.Core.LogAbstraction; using EventStore.Core.Messages; using EventStore.Core.Metrics; +using EventStore.Core.Services.UserManagement; using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Checkpoint; using EventStore.Core.TransactionLog.Chunks; @@ -59,6 +60,7 @@ public ReadIndex(IPublisher bus, ICheckpoint indexCheckpoint, IIndexStatusTracker indexStatusTracker, IIndexTracker indexTracker, + ITransactionFileTrackerFactory tfTrackers, ICacheHitsMissesTracker cacheTracker) { Ensure.NotNull(bus, "bus"); @@ -89,37 +91,37 @@ public ReadIndex(IPublisher bus, var eventTypeNames = streamNamesProvider.EventTypes; var streamExistenceFilterInitializer = streamNamesProvider.StreamExistenceFilterInitializer; - _indexWriter = new IndexWriter(indexBackend, _indexReader, _streamIds, _streamNames, systemStreams, emptyStreamName, sizer); + _indexWriter = new IndexWriter(indexBackend, _indexReader, _streamIds, _streamNames, systemStreams, emptyStreamName, tfTrackers.For(SystemAccounts.SystemWriterName), sizer); _indexCommitter = new IndexCommitter(bus, indexBackend, _indexReader, tableIndex, streamNameIndex, _streamNames, eventTypeIndex, eventTypeNames, systemStreams, streamExistenceFilter, - streamExistenceFilterInitializer, indexCheckpoint, indexStatusTracker, indexTracker, additionalCommitChecks); + streamExistenceFilterInitializer, indexCheckpoint, indexStatusTracker, indexTracker, tfTrackers.For(SystemAccounts.SystemIndexCommitterName), additionalCommitChecks); _allReader = new AllReader(indexBackend, _indexCommitter, _streamNames, eventTypeNames); RegisterHitsMisses(cacheTracker); } - IndexReadEventResult IReadIndex.ReadEvent(string streamName, TStreamId streamId, long eventNumber) { - return _indexReader.ReadEvent(streamName, streamId, eventNumber); + IndexReadEventResult IReadIndex.ReadEvent(string streamName, TStreamId streamId, long eventNumber, ITransactionFileTracker tracker) { + return _indexReader.ReadEvent(streamName, streamId, eventNumber, tracker); } - IndexReadStreamResult IReadIndex.ReadStreamEventsForward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount) { - return _indexReader.ReadStreamEventsForward(streamName, streamId, fromEventNumber, maxCount); + IndexReadStreamResult IReadIndex.ReadStreamEventsForward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount, ITransactionFileTracker tracker) { + return _indexReader.ReadStreamEventsForward(streamName, streamId, fromEventNumber, maxCount, tracker); } - IndexReadStreamResult IReadIndex.ReadStreamEventsBackward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount) { - return _indexReader.ReadStreamEventsBackward(streamName, streamId, fromEventNumber, maxCount); + IndexReadStreamResult IReadIndex.ReadStreamEventsBackward(string streamName, TStreamId streamId, long fromEventNumber, int maxCount, ITransactionFileTracker tracker) { + return _indexReader.ReadStreamEventsBackward(streamName, streamId, fromEventNumber, maxCount, tracker); } TStreamId IReadIndex.GetStreamId(string streamName) { return _streamIds.LookupValue(streamName); } - public IndexReadEventInfoResult ReadEventInfo_KeepDuplicates(TStreamId streamId, long eventNumber) { - return _indexReader.ReadEventInfo_KeepDuplicates(streamId, eventNumber); + public IndexReadEventInfoResult ReadEventInfo_KeepDuplicates(TStreamId streamId, long eventNumber, ITransactionFileTracker tracker) { + return _indexReader.ReadEventInfo_KeepDuplicates(streamId, eventNumber, tracker); } - public IndexReadEventInfoResult ReadEventInfoForward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, long beforePosition) { - return _indexReader.ReadEventInfoForward_KnownCollisions(streamId, fromEventNumber, maxCount, beforePosition); + public IndexReadEventInfoResult ReadEventInfoForward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, long beforePosition, ITransactionFileTracker tracker) { + return _indexReader.ReadEventInfoForward_KnownCollisions(streamId, fromEventNumber, maxCount, beforePosition, tracker); } public IndexReadEventInfoResult ReadEventInfoForward_NoCollisions(ulong stream, long fromEventNumber, int maxCount, long beforePosition) { @@ -127,63 +129,67 @@ public IndexReadEventInfoResult ReadEventInfoForward_NoCollisions(ulong stream, } public IndexReadEventInfoResult ReadEventInfoBackward_KnownCollisions(TStreamId streamId, long fromEventNumber, int maxCount, - long beforePosition) { - return _indexReader.ReadEventInfoBackward_KnownCollisions(streamId, fromEventNumber, maxCount, beforePosition); + long beforePosition, ITransactionFileTracker tracker) { + return _indexReader.ReadEventInfoBackward_KnownCollisions(streamId, fromEventNumber, maxCount, beforePosition, tracker); } public IndexReadEventInfoResult ReadEventInfoBackward_NoCollisions(ulong stream, Func getStreamId, - long fromEventNumber, int maxCount, long beforePosition) { - return _indexReader.ReadEventInfoBackward_NoCollisions(stream, getStreamId, fromEventNumber, maxCount, beforePosition); + long fromEventNumber, int maxCount, long beforePosition, ITransactionFileTracker tracker) { + return _indexReader.ReadEventInfoBackward_NoCollisions(stream, getStreamId, fromEventNumber, maxCount, beforePosition, tracker); } string IReadIndex.GetStreamName(TStreamId streamId) { return _streamNames.LookupName(streamId); } - bool IReadIndex.IsStreamDeleted(TStreamId streamId) { - return _indexReader.GetStreamLastEventNumber(streamId) == EventNumber.DeletedStream; + bool IReadIndex.IsStreamDeleted(TStreamId streamId, ITransactionFileTracker tracker) { + return _indexReader.GetStreamLastEventNumber(streamId, tracker) == EventNumber.DeletedStream; } - long IReadIndex.GetStreamLastEventNumber(TStreamId streamId) { - return _indexReader.GetStreamLastEventNumber(streamId); + long IReadIndex.GetStreamLastEventNumber(TStreamId streamId, ITransactionFileTracker tracker) { + return _indexReader.GetStreamLastEventNumber(streamId, tracker); } - public long GetStreamLastEventNumber_KnownCollisions(TStreamId streamId, long beforePosition) { - return _indexReader.GetStreamLastEventNumber_KnownCollisions(streamId, beforePosition); + public long GetStreamLastEventNumber_KnownCollisions(TStreamId streamId, long beforePosition, ITransactionFileTracker tracker) { + return _indexReader.GetStreamLastEventNumber_KnownCollisions(streamId, beforePosition, tracker); } - public long GetStreamLastEventNumber_NoCollisions(ulong stream, Func getStreamId, long beforePosition) { - return _indexReader.GetStreamLastEventNumber_NoCollisions(stream, getStreamId, beforePosition); + public long GetStreamLastEventNumber_NoCollisions(ulong stream, Func getStreamId, long beforePosition, ITransactionFileTracker tracker) { + return _indexReader.GetStreamLastEventNumber_NoCollisions(stream, getStreamId, beforePosition, tracker); } - StreamMetadata IReadIndex.GetStreamMetadata(TStreamId streamId) { - return _indexReader.GetStreamMetadata(streamId); + StreamMetadata IReadIndex.GetStreamMetadata(TStreamId streamId, ITransactionFileTracker tracker) { + return _indexReader.GetStreamMetadata(streamId, tracker); } - public TStreamId GetEventStreamIdByTransactionId(long transactionId) { - return _indexReader.GetEventStreamIdByTransactionId(transactionId); + public TStreamId GetEventStreamIdByTransactionId(long transactionId, ITransactionFileTracker tracker) { + return _indexReader.GetEventStreamIdByTransactionId(transactionId, tracker); } - IndexReadAllResult IReadIndex.ReadAllEventsForward(TFPos pos, int maxCount) { - return _allReader.ReadAllEventsForward(pos, maxCount); + IndexReadAllResult IReadIndex.ReadAllEventsForward(TFPos pos, int maxCount, + ITransactionFileTracker tracker) { + return _allReader.ReadAllEventsForward(pos, maxCount, tracker); } IndexReadAllResult IReadIndex.ReadAllEventsForwardFiltered(TFPos pos, int maxCount, int maxSearchWindow, - IEventFilter eventFilter) { - return _allReader.FilteredReadAllEventsForward(pos, maxCount, maxSearchWindow, eventFilter); + IEventFilter eventFilter, + ITransactionFileTracker tracker) { + return _allReader.FilteredReadAllEventsForward(pos, maxCount, maxSearchWindow, eventFilter, tracker); } IndexReadAllResult IReadIndex.ReadAllEventsBackwardFiltered(TFPos pos, int maxCount, int maxSearchWindow, - IEventFilter eventFilter) { - return _allReader.FilteredReadAllEventsBackward(pos, maxCount, maxSearchWindow, eventFilter); + IEventFilter eventFilter, + ITransactionFileTracker tracker) { + return _allReader.FilteredReadAllEventsBackward(pos, maxCount, maxSearchWindow, eventFilter, tracker); } - IndexReadAllResult IReadIndex.ReadAllEventsBackward(TFPos pos, int maxCount) { - return _allReader.ReadAllEventsBackward(pos, maxCount); + IndexReadAllResult IReadIndex.ReadAllEventsBackward(TFPos pos, int maxCount, + ITransactionFileTracker tracker) { + return _allReader.ReadAllEventsBackward(pos, maxCount, tracker); } - public StorageMessage.EffectiveAcl GetEffectiveAcl(TStreamId streamId) { - return _indexReader.GetEffectiveAcl(streamId); + public StorageMessage.EffectiveAcl GetEffectiveAcl(TStreamId streamId, ITransactionFileTracker tracker) { + return _indexReader.GetEffectiveAcl(streamId, tracker); } void RegisterHitsMisses(ICacheHitsMissesTracker tracker) { diff --git a/src/EventStore.Core/Services/Storage/StorageReaderService.cs b/src/EventStore.Core/Services/Storage/StorageReaderService.cs index a60957184be..36bfbdedcf1 100644 --- a/src/EventStore.Core/Services/Storage/StorageReaderService.cs +++ b/src/EventStore.Core/Services/Storage/StorageReaderService.cs @@ -7,6 +7,7 @@ using EventStore.Core.Messaging; using EventStore.Core.Metrics; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Checkpoint; using ILogger = Serilog.ILogger; @@ -33,6 +34,7 @@ public StorageReaderService( int threadCount, IReadOnlyCheckpoint writerCheckpoint, IInMemoryStreamReader inMemReader, + ITransactionFileTrackerFactory tfTrackers, QueueStatsManager queueStatsManager, QueueTrackers trackers) { @@ -49,7 +51,7 @@ public StorageReaderService( StorageReaderWorker[] readerWorkers = new StorageReaderWorker[threadCount]; InMemoryBus[] storageReaderBuses = new InMemoryBus[threadCount]; for (var i = 0; i < threadCount; i++) { - readerWorkers[i] = new StorageReaderWorker(bus, readIndex, systemStreams, writerCheckpoint, inMemReader, i); + readerWorkers[i] = new StorageReaderWorker(bus, readIndex, systemStreams, writerCheckpoint, inMemReader, tfTrackers, i); storageReaderBuses[i] = new InMemoryBus("StorageReaderBus", watchSlowMsg: false); storageReaderBuses[i].Subscribe(readerWorkers[i]); storageReaderBuses[i].Subscribe(readerWorkers[i]); diff --git a/src/EventStore.Core/Services/Storage/StorageReaderWorker.cs b/src/EventStore.Core/Services/Storage/StorageReaderWorker.cs index db5ab5d01ea..ae36c22dc54 100644 --- a/src/EventStore.Core/Services/Storage/StorageReaderWorker.cs +++ b/src/EventStore.Core/Services/Storage/StorageReaderWorker.cs @@ -13,6 +13,9 @@ using EventStore.Core.Services.TimerService; using EventStore.Core.Messaging; using ILogger = Serilog.ILogger; +using EventStore.Core.TransactionLog; +using EventStore.Core.TransactionLog.Chunks; +using EventStore.Core.Services.UserManagement; namespace EventStore.Core.Services.Storage { public abstract class StorageReaderWorker { @@ -38,6 +41,7 @@ public class StorageReaderWorker : private readonly ISystemStreamLookup _systemStreams; private readonly IReadOnlyCheckpoint _writerCheckpoint; private readonly IInMemoryStreamReader _inMemReader; + private readonly ITransactionFileTrackerFactory _trackers; private readonly int _queueId; private static readonly char[] LinkToSeparator = { '@' }; private const int MaxPageSize = 4096; @@ -54,6 +58,7 @@ public StorageReaderWorker( ISystemStreamLookup systemStreams, IReadOnlyCheckpoint writerCheckpoint, IInMemoryStreamReader inMemReader, + ITransactionFileTrackerFactory trackers, int queueId) { Ensure.NotNull(publisher, "publisher"); Ensure.NotNull(readIndex, "readIndex"); @@ -66,6 +71,7 @@ public StorageReaderWorker( _writerCheckpoint = writerCheckpoint; _queueId = queueId; _inMemReader = inMemReader; + _trackers = trackers; } void IHandle.Handle(ClientMessage.ReadEvent msg) { @@ -314,7 +320,7 @@ public StorageReaderWorker( msg.Envelope.ReplyWith(new StorageMessage.OperationCancelledMessage(msg.CancellationToken)); return; } - var acl = _readIndex.GetEffectiveAcl(_readIndex.GetStreamId(msg.StreamId)); + var acl = _readIndex.GetEffectiveAcl(_readIndex.GetStreamId(msg.StreamId), _trackers.For(SystemAccounts.SystemName)); msg.Envelope.ReplyWith(new StorageMessage.EffectiveStreamAclResponse(acl)); } @@ -323,7 +329,7 @@ private ClientMessage.ReadEventCompleted ReadEvent(ClientMessage.ReadEvent msg) try { var streamName = msg.EventStreamId; var streamId = _readIndex.GetStreamId(streamName); - var result = _readIndex.ReadEvent(streamName, streamId, msg.EventNumber); + var result = _readIndex.ReadEvent(streamName, streamId, msg.EventNumber, _trackers.For(msg)); var record = result.Result == ReadEventResult.Success && msg.ResolveLinkTos ? ResolveLinkToEvent(result.Record, msg.User, null) : ResolvedEvent.ForUnresolvedEvent(result.Record); @@ -355,15 +361,16 @@ private ClientMessage.ReadStreamEventsForwardCompleted ReadStreamEventsForward( throw new ArgumentException($"Read size too big, should be less than {MaxPageSize} items"); } + var tracker = _trackers.For(msg); var streamName = msg.EventStreamId; var streamId = _readIndex.GetStreamId(msg.EventStreamId); if (msg.ValidationStreamVersion.HasValue && - _readIndex.GetStreamLastEventNumber(streamId) == msg.ValidationStreamVersion) + _readIndex.GetStreamLastEventNumber(streamId, tracker) == msg.ValidationStreamVersion) return NoData(msg, ReadStreamResult.NotModified, lastIndexPosition, msg.ValidationStreamVersion.Value); var result = - _readIndex.ReadStreamEventsForward(streamName, streamId, msg.FromEventNumber, msg.MaxCount); + _readIndex.ReadStreamEventsForward(streamName, streamId, msg.FromEventNumber, msg.MaxCount, tracker); CheckEventsOrder(msg, result); var resolvedPairs = ResolveLinkToEvents(result.Records, msg.ResolveLinkTos, msg.User); if (resolvedPairs == null) @@ -389,16 +396,17 @@ private ClientMessage.ReadStreamEventsBackwardCompleted ReadStreamEventsBackward throw new ArgumentException($"Read size too big, should be less than {MaxPageSize} items"); } + var tracker = _trackers.For(msg); var streamName = msg.EventStreamId; var streamId = _readIndex.GetStreamId(msg.EventStreamId); if (msg.ValidationStreamVersion.HasValue && - _readIndex.GetStreamLastEventNumber(streamId) == msg.ValidationStreamVersion) + _readIndex.GetStreamLastEventNumber(streamId, tracker) == msg.ValidationStreamVersion) return NoData(msg, ReadStreamResult.NotModified, lastIndexedPosition, msg.ValidationStreamVersion.Value); var result = _readIndex.ReadStreamEventsBackward(streamName, streamId, msg.FromEventNumber, - msg.MaxCount); + msg.MaxCount, tracker); CheckEventsOrder(msg, result); var resolvedPairs = ResolveLinkToEvents(result.Records, msg.ResolveLinkTos, msg.User); if (resolvedPairs == null) @@ -435,12 +443,13 @@ private ClientMessage.ReadAllEventsForwardCompleted if (msg.ValidationTfLastCommitPosition == lastIndexedPosition) return NoData(msg, ReadAllResult.NotModified, pos, lastIndexedPosition); - var res = _readIndex.ReadAllEventsForward(pos, msg.MaxCount); + var tracker = _trackers.For(msg); + var res = _readIndex.ReadAllEventsForward(pos, msg.MaxCount, tracker); var resolved = ResolveReadAllResult(res.Records, msg.ResolveLinkTos, msg.User); if (resolved == null) return NoData(msg, ReadAllResult.AccessDenied, pos, lastIndexedPosition); - var metadata = _readIndex.GetStreamMetadata(_systemStreams.AllStream); + var metadata = _readIndex.GetStreamMetadata(_systemStreams.AllStream, tracker); return new ClientMessage.ReadAllEventsForwardCompleted( msg.CorrelationId, ReadAllResult.Success, null, resolved, metadata, false, msg.MaxCount, res.CurrentPos, res.NextPos, res.PrevPos, lastIndexedPosition); @@ -471,12 +480,13 @@ private ClientMessage.ReadAllEventsBackwardCompleted ReadAllEventsBackward( if (msg.ValidationTfLastCommitPosition == lastIndexedPosition) return NoData(msg, ReadAllResult.NotModified, pos, lastIndexedPosition); - var res = _readIndex.ReadAllEventsBackward(pos, msg.MaxCount); + var tracker = _trackers.GetOrAdd(msg.User.Identity.Name); + var res = _readIndex.ReadAllEventsBackward(pos, msg.MaxCount, tracker); var resolved = ResolveReadAllResult(res.Records, msg.ResolveLinkTos, msg.User); if (resolved == null) return NoData(msg, ReadAllResult.AccessDenied, pos, lastIndexedPosition); - var metadata = _readIndex.GetStreamMetadata(_systemStreams.AllStream); + var metadata = _readIndex.GetStreamMetadata(_systemStreams.AllStream, tracker); return new ClientMessage.ReadAllEventsBackwardCompleted( msg.CorrelationId, ReadAllResult.Success, null, resolved, metadata, false, msg.MaxCount, res.CurrentPos, res.NextPos, res.PrevPos, lastIndexedPosition); @@ -509,14 +519,16 @@ private ClientMessage.FilteredReadAllEventsForwardCompleted FilteredReadAllEvent return NoDataForFilteredCommand(msg, FilteredReadAllResult.NotModified, pos, lastIndexedPosition); + var tracker = _trackers.For(msg); ; var res = _readIndex.ReadAllEventsForwardFiltered(pos, msg.MaxCount, msg.MaxSearchWindow, - msg.EventFilter); + msg.EventFilter, + tracker); var resolved = ResolveReadAllResult(res.Records, msg.ResolveLinkTos, msg.User); if (resolved == null) return NoDataForFilteredCommand(msg, FilteredReadAllResult.AccessDenied, pos, lastIndexedPosition); - var metadata = _readIndex.GetStreamMetadata(_systemStreams.AllStream); + var metadata = _readIndex.GetStreamMetadata(_systemStreams.AllStream, tracker); return new ClientMessage.FilteredReadAllEventsForwardCompleted( msg.CorrelationId, FilteredReadAllResult.Success, null, resolved, metadata, false, msg.MaxCount, @@ -552,14 +564,15 @@ private ClientMessage.FilteredReadAllEventsBackwardCompleted FilteredReadAllEven return NoDataForFilteredCommand(msg, FilteredReadAllResult.NotModified, pos, lastIndexedPosition); + var tracker = _trackers.For(msg); var res = _readIndex.ReadAllEventsBackwardFiltered(pos, msg.MaxCount, msg.MaxSearchWindow, - msg.EventFilter); + msg.EventFilter, tracker); var resolved = ResolveReadAllResult(res.Records, msg.ResolveLinkTos, msg.User); if (resolved == null) return NoDataForFilteredCommand(msg, FilteredReadAllResult.AccessDenied, pos, lastIndexedPosition); - var metadata = _readIndex.GetStreamMetadata(_systemStreams.AllStream); + var metadata = _readIndex.GetStreamMetadata(_systemStreams.AllStream, tracker); return new ClientMessage.FilteredReadAllEventsBackwardCompleted( msg.CorrelationId, FilteredReadAllResult.Success, null, resolved, metadata, false, msg.MaxCount, @@ -669,7 +682,7 @@ private ResolvedEvent[] ResolveLinkToEvents(EventRecord[] records, bool resolveL if (long.TryParse(parts[0], out long eventNumber)) { var streamName = parts[1]; var streamId = _readIndex.GetStreamId(streamName); - var res = _readIndex.ReadEvent(streamName, streamId, eventNumber); + var res = _readIndex.ReadEvent(streamName, streamId, eventNumber, _trackers.For(user)); if (res.Result == ReadEventResult.Success) return ResolvedEvent.ForResolvedLink(res.Record, eventRecord, commitPosition); @@ -770,7 +783,7 @@ public void Handle(StorageMessage.StreamIdFromTransactionIdRequest message) { if (message.CancellationToken.IsCancellationRequested) { message.Envelope.ReplyWith(new StorageMessage.OperationCancelledMessage(message.CancellationToken)); } - var streamId = _readIndex.GetEventStreamIdByTransactionId(message.TransactionId); + var streamId = _readIndex.GetEventStreamIdByTransactionId(message.TransactionId, _trackers.For(SystemAccounts.SystemName)); var streamName = _readIndex.GetStreamName(streamId); message.Envelope.ReplyWith(new StorageMessage.StreamIdFromTransactionIdResponse(streamName)); } diff --git a/src/EventStore.Core/Services/SubscriptionsService.cs b/src/EventStore.Core/Services/SubscriptionsService.cs index 96275247547..9e9e958b16f 100644 --- a/src/EventStore.Core/Services/SubscriptionsService.cs +++ b/src/EventStore.Core/Services/SubscriptionsService.cs @@ -10,6 +10,8 @@ using System.Linq; using EventStore.Core.Util; using ILogger = Serilog.ILogger; +using EventStore.Core.TransactionLog; +using EventStore.Core.Services.UserManagement; namespace EventStore.Core.Services { public enum SubscriptionDropReason { @@ -56,12 +58,15 @@ public class SubscriptionsService : private readonly IEnvelope _busEnvelope; private readonly IQueuedHandler _queuedHandler; private readonly IReadIndex _readIndex; + private readonly ITransactionFileTrackerFactory _tfTrackers; + private readonly ITransactionFileTracker _tfTracker; private static readonly char[] _linkToSeparator = new[] { '@' }; public SubscriptionsService( IPublisher bus, IQueuedHandler queuedHandler, - IReadIndex readIndex) { + IReadIndex readIndex, + ITransactionFileTrackerFactory tfTrackers) { Ensure.NotNull(bus, "bus"); Ensure.NotNull(queuedHandler, "queuedHandler"); @@ -71,6 +76,8 @@ public SubscriptionsService( _busEnvelope = new PublishEnvelope(bus); _queuedHandler = queuedHandler; _readIndex = readIndex; + _tfTrackers = tfTrackers; + _tfTracker = tfTrackers.For(SystemAccounts.SystemSubscriptionsName); } public void Handle(SystemMessage.SystemStart message) { @@ -120,7 +127,7 @@ public void Handle(ClientMessage.SubscribeToStream msg) { if (isInMemoryStream) { lastEventNumber = -1; } else if (!msg.EventStreamId.IsEmptyString()) { - lastEventNumber = _readIndex.GetStreamLastEventNumber(_readIndex.GetStreamId(msg.EventStreamId)); + lastEventNumber = _readIndex.GetStreamLastEventNumber(_readIndex.GetStreamId(msg.EventStreamId), _tfTrackers.For(msg)); } var lastIndexedPos = isInMemoryStream ? -1 : _readIndex.LastIndexedPosition; @@ -140,7 +147,7 @@ public void Handle(ClientMessage.FilteredSubscribeToStream msg) { if (isInMemoryStream) { lastEventNumber = -1; } else if (!msg.EventStreamId.IsEmptyString()) { - lastEventNumber = _readIndex.GetStreamLastEventNumber(_readIndex.GetStreamId(msg.EventStreamId)); + lastEventNumber = _readIndex.GetStreamLastEventNumber(_readIndex.GetStreamId(msg.EventStreamId), _tfTrackers.For(msg)); } var lastIndexedPos = isInMemoryStream ? -1 : _readIndex.LastIndexedPosition; @@ -342,7 +349,7 @@ private ResolvedEvent ResolveLinkToEvent(EventRecord eventRecord, long commitPos long eventNumber = long.Parse(parts[0]); string streamName = parts[1]; var streamId = _readIndex.GetStreamId(streamName); - var res = _readIndex.ReadEvent(streamName, streamId, eventNumber); + var res = _readIndex.ReadEvent(streamName, streamId, eventNumber, _tfTracker); if (res.Result == ReadEventResult.Success) return ResolvedEvent.ForResolvedLink(res.Record, eventRecord, commitPosition); diff --git a/src/EventStore.Core/Services/Transport/Grpc/Enumerators.AllSubscription.cs b/src/EventStore.Core/Services/Transport/Grpc/Enumerators.AllSubscription.cs index 0b984ddeb42..61fb8346170 100644 --- a/src/EventStore.Core/Services/Transport/Grpc/Enumerators.AllSubscription.cs +++ b/src/EventStore.Core/Services/Transport/Grpc/Enumerators.AllSubscription.cs @@ -10,6 +10,7 @@ using EventStore.Core.Messages; using EventStore.Core.Messaging; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using Serilog; namespace EventStore.Core.Services.Transport.Grpc { @@ -25,6 +26,7 @@ public class AllSubscription : IAsyncEnumerator { private readonly bool _requiresLeader; private readonly IReadIndex _readIndex; private readonly ReadReq.Types.Options.Types.UUIDOption _uuidOption; + private readonly ITransactionFileTracker _tracker; private readonly CancellationToken _cancellationToken; private readonly Channel _channel; private readonly SemaphoreSlim _semaphore; @@ -45,6 +47,7 @@ public AllSubscription(IPublisher bus, bool requiresLeader, IReadIndex readIndex, ReadReq.Types.Options.Types.UUIDOption uuidOption, + ITransactionFileTracker tracker, CancellationToken cancellationToken) { if (bus == null) { throw new ArgumentNullException(nameof(bus)); @@ -62,6 +65,7 @@ public AllSubscription(IPublisher bus, _requiresLeader = requiresLeader; _readIndex = readIndex; _uuidOption = uuidOption; + _tracker = tracker; _cancellationToken = cancellationToken; _channel = Channel.CreateBounded(BoundedChannelOptions); _semaphore = new SemaphoreSlim(1, 1); @@ -128,7 +132,7 @@ private void Subscribe(Position? startPosition) { var (commitPosition, preparePosition) = startPosition.Value.ToInt64(); try { var indexResult = - _readIndex.ReadAllEventsForward(new TFPos(commitPosition, preparePosition), 1); + _readIndex.ReadAllEventsForward(new TFPos(commitPosition, preparePosition), 1, _tracker); CatchUp(Position.FromInt64(indexResult.NextPos.CommitPosition, indexResult.NextPos.PreparePosition)); } catch (Exception ex) { diff --git a/src/EventStore.Core/Services/Transport/Grpc/Enumerators.AllSubscriptionFiltered.cs b/src/EventStore.Core/Services/Transport/Grpc/Enumerators.AllSubscriptionFiltered.cs index 3e3c1de3702..2d1059e98e4 100644 --- a/src/EventStore.Core/Services/Transport/Grpc/Enumerators.AllSubscriptionFiltered.cs +++ b/src/EventStore.Core/Services/Transport/Grpc/Enumerators.AllSubscriptionFiltered.cs @@ -10,6 +10,7 @@ using EventStore.Core.Messages; using EventStore.Core.Messaging; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using Serilog; using IReadIndex = EventStore.Core.Services.Storage.ReaderIndex.IReadIndex; @@ -27,6 +28,7 @@ public class AllSubscriptionFiltered : IAsyncEnumerator { private readonly bool _requiresLeader; private readonly IReadIndex _readIndex; private readonly ReadReq.Types.Options.Types.UUIDOption _uuidOption; + private readonly ITransactionFileTracker _tfTracker; private readonly uint _maxSearchWindow; private readonly CancellationToken _cancellationToken; private readonly Channel _channel; @@ -53,6 +55,7 @@ public AllSubscriptionFiltered(IPublisher bus, uint? maxSearchWindow, uint checkpointIntervalMultiplier, ReadReq.Types.Options.Types.UUIDOption uuidOption, + ITransactionFileTracker tfTracker, CancellationToken cancellationToken) { if (bus == null) { throw new ArgumentNullException(nameof(bus)); @@ -80,6 +83,7 @@ public AllSubscriptionFiltered(IPublisher bus, _readIndex = readIndex; _maxSearchWindow = maxSearchWindow ?? ReadBatchSize; _uuidOption = uuidOption; + _tfTracker = tfTracker; _cancellationToken = cancellationToken; _subscriptionStarted = 0; _channel = Channel.CreateBounded(BoundedChannelOptions); @@ -151,7 +155,7 @@ private void Subscribe(Position? startPosition) { var (commitPosition, preparePosition) = startPosition.Value.ToInt64(); try { var indexResult = - _readIndex.ReadAllEventsForward(new TFPos(commitPosition, preparePosition), 1); + _readIndex.ReadAllEventsForward(new TFPos(commitPosition, preparePosition), 1, _tfTracker); CatchUp(Position.FromInt64(indexResult.NextPos.CommitPosition, indexResult.NextPos.PreparePosition)); } catch (Exception ex) { diff --git a/src/EventStore.Core/Services/Transport/Grpc/Streams.Read.cs b/src/EventStore.Core/Services/Transport/Grpc/Streams.Read.cs index 2fcaaed9753..99d54b8cdea 100644 --- a/src/EventStore.Core/Services/Transport/Grpc/Streams.Read.cs +++ b/src/EventStore.Core/Services/Transport/Grpc/Streams.Read.cs @@ -3,6 +3,7 @@ using System.Linq; using System.Threading.Tasks; using EventStore.Client.Streams; +using EventStore.Core.Messages; using EventStore.Core.Metrics; using EventStore.Core.Services.Storage.ReaderIndex; using Grpc.Core; @@ -29,6 +30,7 @@ public override async Task Read( var compatibility = options.ControlOption?.Compatibility ?? 0; var user = context.GetHttpContext().User; + var tfTracker = _tfTrackers.For(user); var requiresLeader = GetRequiresLeader(context.RequestHeaders); var op = streamOptionsCase switch { @@ -170,6 +172,7 @@ public override async Task Read( requiresLeader, _readIndex, options.UuidOption, + tfTracker, context.CancellationToken), (StreamOptionOneofCase.All, CountOptionOneofCase.Subscription, @@ -191,6 +194,7 @@ public override async Task Read( }, request.Options.Filter.CheckpointIntervalMultiplier, options.UuidOption, + tfTracker, context.CancellationToken), _ => throw RpcExceptions.InvalidCombination((streamOptionsCase, countOptionsCase, readDirection, filterOptionsCase)) diff --git a/src/EventStore.Core/Services/Transport/Grpc/Streams.cs b/src/EventStore.Core/Services/Transport/Grpc/Streams.cs index a11fb5f5ccb..ea8add97310 100644 --- a/src/EventStore.Core/Services/Transport/Grpc/Streams.cs +++ b/src/EventStore.Core/Services/Transport/Grpc/Streams.cs @@ -3,6 +3,7 @@ using EventStore.Core.Bus; using EventStore.Core.Metrics; using EventStore.Core.Services.Storage.ReaderIndex; +using EventStore.Core.TransactionLog; using EventStore.Plugins.Authorization; namespace EventStore.Core.Services.Transport.Grpc { @@ -12,6 +13,7 @@ internal partial class Streams : EventStore.Client.Streams.Streams.St private readonly int _maxAppendSize; private readonly TimeSpan _writeTimeout; private readonly IExpiryStrategy _expiryStrategy; + private readonly ITransactionFileTrackerFactory _tfTrackers; private readonly IDurationTracker _readTracker; private readonly IDurationTracker _appendTracker; private readonly IDurationTracker _batchAppendTracker; @@ -24,6 +26,7 @@ internal partial class Streams : EventStore.Client.Streams.Streams.St public Streams(IPublisher publisher, IReadIndex readIndex, int maxAppendSize, TimeSpan writeTimeout, IExpiryStrategy expiryStrategy, + ITransactionFileTrackerFactory tfTrackers, GrpcTrackers trackers, IAuthorizationProvider provider) { @@ -33,6 +36,7 @@ public Streams(IPublisher publisher, IReadIndex readIndex, int maxApp _maxAppendSize = maxAppendSize; _writeTimeout = writeTimeout; _expiryStrategy = expiryStrategy; + _tfTrackers = tfTrackers; _readTracker = trackers[MetricsConfiguration.GrpcMethod.StreamRead]; _appendTracker = trackers[MetricsConfiguration.GrpcMethod.StreamAppend]; _batchAppendTracker = trackers[MetricsConfiguration.GrpcMethod.StreamBatchAppend]; diff --git a/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs b/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs index bd2b3cb3607..a9d155b4db3 100644 --- a/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs +++ b/src/EventStore.Core/Services/UserManagement/SystemAccounts.cs @@ -4,11 +4,25 @@ namespace EventStore.Core.Services.UserManagement { public class SystemAccounts { private static readonly IReadOnlyList Claims = new[] { - new Claim(ClaimTypes.Name, "system"), - new Claim(ClaimTypes.Role, "system"), - new Claim(ClaimTypes.Role, SystemRoles.Admins), + new Claim(ClaimTypes.Name, "system"), + new Claim(ClaimTypes.Role, "system"), + new Claim(ClaimTypes.Role, SystemRoles.Admins), }; public static readonly ClaimsPrincipal System = new ClaimsPrincipal(new ClaimsIdentity(Claims, "system")); public static readonly ClaimsPrincipal Anonymous = new ClaimsPrincipal(new ClaimsIdentity(new Claim[]{new Claim(ClaimTypes.Anonymous, ""), })); + + // we may want to make the granularity here configurable, but as a starting point we only + // separate scavenge, because its the only part the user has direct control over + public static readonly string SystemChaserName = "system"; + public static readonly string SystemEpochManagerName = "system"; + public static readonly string SystemName = "system"; + public static readonly string SystemIndexCommitterName = "system"; + public static readonly string SystemPersistentSubscriptionsName = "system"; + public static readonly string SystemRedactionName = "system"; + public static readonly string SystemReplicationName = "system"; + public static readonly string SystemScavengeName = "system-scavenge"; + public static readonly string SystemSubscriptionsName = "system"; + public static readonly string SystemTelemetryName = "system"; + public static readonly string SystemWriterName = "system"; } } diff --git a/src/EventStore.Core/Telemetry/TelemetryService.cs b/src/EventStore.Core/Telemetry/TelemetryService.cs index 4aa34667fda..6daf27aa771 100644 --- a/src/EventStore.Core/Telemetry/TelemetryService.cs +++ b/src/EventStore.Core/Telemetry/TelemetryService.cs @@ -9,6 +9,7 @@ using EventStore.Core.Messages; using EventStore.Core.Messaging; using EventStore.Core.Services.TimerService; +using EventStore.Core.TransactionLog; using EventStore.Core.TransactionLog.Checkpoint; using EventStore.Core.TransactionLog.Chunks; using EventStore.Core.TransactionLog.LogRecords; @@ -29,6 +30,7 @@ public sealed class TelemetryService : IDisposable, private readonly CancellationTokenSource _cts = new(); private readonly IPublisher _publisher; private readonly IReadOnlyCheckpoint _writerCheckpoint; + private readonly ITransactionFileTracker _tfTracker; private readonly DateTime _startTime = DateTime.UtcNow; private readonly Guid _nodeId; private readonly TFChunkManager _manager; @@ -44,12 +46,14 @@ public TelemetryService( IPublisher publisher, ITelemetrySink sink, IReadOnlyCheckpoint writerCheckpoint, + ITransactionFileTracker tfTracker, Guid nodeId) { _manager = manager; _nodeOptions = nodeOptions; _publisher = publisher; _writerCheckpoint = writerCheckpoint; + _tfTracker = tfTracker; _nodeId = nodeId; Task.Run(async () => { try { @@ -178,7 +182,7 @@ private static void OnGossipReceived(IEnvelope envelo private void ReadFirstEpoch() { try { var chunk = _manager.GetChunkFor(0); - var result = chunk.TryReadAt(0, false); + var result = chunk.TryReadAt(0, false, _tfTracker); if (!result.Success) return; diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/ReaderWorkItem.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/ReaderWorkItem.cs index f1db909f15e..f22ab832918 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/ReaderWorkItem.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/ReaderWorkItem.cs @@ -1,6 +1,9 @@ +#nullable enable + using System.IO; namespace EventStore.Core.TransactionLog.Chunks.TFChunk { + // ReaderWorkItems are always checked out of a pool and used by one thread at a time internal class ReaderWorkItem { public readonly Stream Stream; public readonly BinaryReader Reader; @@ -11,5 +14,15 @@ public ReaderWorkItem(Stream stream, BinaryReader reader, bool isMemory) { Reader = reader; IsMemory = isMemory; } + + public ITransactionFileTracker Tracker { get; private set; } = ITransactionFileTracker.NoOp; + + public void OnCheckedOut(ITransactionFileTracker tracker) { + Tracker = tracker; + } + + public void OnReturning() { + Tracker = ITransactionFileTracker.NoOp; + } } } diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs index 60bbe9e74c1..6858303d80b 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunk.cs @@ -254,7 +254,8 @@ private void InitCompleted(bool verifyHash, bool optimizeReadSideCache, ITransac SetAttributes(_filename, true); CreateReaderStreams(); - var reader = GetReaderWorkItem(); + // no need to track reading the header/footer (currently we only track Prepares read anyway) + var reader = GetReaderWorkItem(ITransactionFileTracker.NoOp); // noop ok, not reading records try { _chunkHeader = ReadHeader(reader.Stream); Log.Debug("Opened completed {chunk} as version {version}", _filename, _chunkHeader.Version); @@ -291,14 +292,14 @@ private void InitCompleted(bool verifyHash, bool optimizeReadSideCache, ITransac } _readSide = _chunkHeader.IsScavenged - ? (IChunkReadSide)new TFChunkReadSideScavenged(this, optimizeReadSideCache, tracker) - : new TFChunkReadSideUnscavenged(this, tracker); + ? (IChunkReadSide)new TFChunkReadSideScavenged(this, optimizeReadSideCache) + : new TFChunkReadSideUnscavenged(this); // do not actually cache now because it is too slow when opening the database _readSide.RequestCaching(); if (verifyHash) - VerifyFileHash(); + VerifyFileHash(tracker); } private void InitNew(ChunkHeader chunkHeader, int fileSize, ITransactionFileTracker tracker) { @@ -319,13 +320,13 @@ private void InitNew(ChunkHeader chunkHeader, int fileSize, ITransactionFileTrac } _readSide = chunkHeader.IsScavenged - ? (IChunkReadSide)new TFChunkReadSideScavenged(this, false, tracker) - : new TFChunkReadSideUnscavenged(this, tracker); + ? (IChunkReadSide)new TFChunkReadSideScavenged(this, false) + : new TFChunkReadSideUnscavenged(this); // Always cache the active chunk // If the chunk is scavenged we will definitely mark it readonly before we are done writing to it. if (!chunkHeader.IsScavenged) { - CacheInMemory(); + CacheInMemory(tracker); } } @@ -360,10 +361,10 @@ private void InitOngoing(int writePosition, bool checkSize, ITransactionFileTrac } } - _readSide = new TFChunkReadSideUnscavenged(this, tracker); + _readSide = new TFChunkReadSideUnscavenged(this); // Always cache the active chunk - CacheInMemory(); + CacheInMemory(tracker); } // If one file stream writes to a file, and another file stream happens to have that part of @@ -537,12 +538,12 @@ private void SetAttributes(string filename, bool isReadOnly) { }); } - public void VerifyFileHash() { + public void VerifyFileHash(ITransactionFileTracker tracker) { if (!IsReadOnly) throw new InvalidOperationException("You can't verify hash of not-completed TFChunk."); Log.Debug("Verifying hash for TFChunk '{chunk}'...", _filename); - using (var reader = AcquireReader()) { + using (var reader = AcquireReader(tracker)) { reader.Stream.Seek(0, SeekOrigin.Begin); var stream = reader.Stream; var footer = _chunkFooter; @@ -615,11 +616,11 @@ private static long GetDataPosition(WriterWorkItem workItem) { // (d) raw (byte offset in file, which is actual - header size) // // this method takes (b) and returns (d) - public long GetActualRawPosition(long logicalPosition) { + public long GetActualRawPosition(long logicalPosition, ITransactionFileTracker tracker) { if (logicalPosition < 0) throw new ArgumentOutOfRangeException(nameof(logicalPosition)); - var actualPosition = _readSide.GetActualPosition(logicalPosition); + var actualPosition = _readSide.GetActualPosition(logicalPosition, tracker); if (actualPosition < 0) return -1; @@ -627,7 +628,7 @@ public long GetActualRawPosition(long logicalPosition) { return GetRawPosition(actualPosition); } - public void CacheInMemory() { + public void CacheInMemory(ITransactionFileTracker tracker) { lock (_cachedDataLock) { if (_inMem) return; @@ -643,7 +644,7 @@ public void CacheInMemory() { // we won the right to cache var sw = Stopwatch.StartNew(); try { - BuildCacheArray(); + BuildCacheArray(tracker); } catch (OutOfMemoryException) { Log.Error("CACHING FAILED due to OutOfMemory exception in TFChunk {chunk}.", this); return; @@ -687,8 +688,8 @@ public void CacheInMemory() { } } - private void BuildCacheArray() { - var workItem = AcquireFileReader(); + private void BuildCacheArray(ITransactionFileTracker tracker) { + var workItem = AcquireFileReader(tracker); try { if (workItem.IsMemory) throw new InvalidOperationException( @@ -748,13 +749,13 @@ public void UnCacheFromMemory() { } } - public bool ExistsAt(long logicalPosition) { - return _readSide.ExistsAt(logicalPosition); + public bool ExistsAt(long logicalPosition, ITransactionFileTracker tracker) { + return _readSide.ExistsAt(logicalPosition, tracker); } - public void OptimizeExistsAt() { + public void OptimizeExistsAt(ITransactionFileTracker tracker) { if (!ChunkHeader.IsScavenged) return; - ((TFChunkReadSideScavenged)_readSide).OptimizeExistsAt(); + ((TFChunkReadSideScavenged)_readSide).OptimizeExistsAt(tracker); } public void DeOptimizeExistsAt() { @@ -762,28 +763,28 @@ public void DeOptimizeExistsAt() { ((TFChunkReadSideScavenged)_readSide).DeOptimizeExistsAt(); } - public RecordReadResult TryReadAt(long logicalPosition, bool couldBeScavenged) { - return _readSide.TryReadAt(logicalPosition, couldBeScavenged); + public RecordReadResult TryReadAt(long logicalPosition, bool couldBeScavenged, ITransactionFileTracker tracker) { + return _readSide.TryReadAt(logicalPosition, couldBeScavenged, tracker); } - public RecordReadResult TryReadFirst() { - return _readSide.TryReadFirst(); + public RecordReadResult TryReadFirst(ITransactionFileTracker tracker) { + return _readSide.TryReadFirst(tracker); } - public RecordReadResult TryReadClosestForward(long logicalPosition) { - return _readSide.TryReadClosestForward(logicalPosition); + public RecordReadResult TryReadClosestForward(long logicalPosition, ITransactionFileTracker tracker) { + return _readSide.TryReadClosestForward(logicalPosition, tracker); } - public RawReadResult TryReadClosestForwardRaw(long logicalPosition, Func getBuffer) { - return _readSide.TryReadClosestForwardRaw(logicalPosition, getBuffer); + public RawReadResult TryReadClosestForwardRaw(long logicalPosition, Func getBuffer, ITransactionFileTracker tracker) { + return _readSide.TryReadClosestForwardRaw(logicalPosition, getBuffer, tracker); } - public RecordReadResult TryReadLast() { - return _readSide.TryReadLast(); + public RecordReadResult TryReadLast(ITransactionFileTracker tracker) { + return _readSide.TryReadLast(tracker); } - public RecordReadResult TryReadClosestBackward(long logicalPosition) { - return _readSide.TryReadClosestBackward(logicalPosition); + public RecordReadResult TryReadClosestBackward(long logicalPosition, ITransactionFileTracker tracker) { + return _readSide.TryReadClosestBackward(logicalPosition, tracker); } public RecordWriteResult TryAppend(ILogRecord record) { @@ -1082,7 +1083,13 @@ public void WaitForDestroy(int timeoutMs) { throw new TimeoutException(); } - private ReaderWorkItem GetReaderWorkItem() { + private ReaderWorkItem GetReaderWorkItem(ITransactionFileTracker tracker) { + var item = GetReaderWorkItemImpl(); + item.OnCheckedOut(tracker); + return item; + } + + private ReaderWorkItem GetReaderWorkItemImpl() { if (_selfdestructin54321) throw new FileBeingDeletedException(); @@ -1139,6 +1146,7 @@ private ReaderWorkItem GetReaderWorkItem() { } private void ReturnReaderWorkItem(ReaderWorkItem item) { + item.OnReturning(); if (item.IsMemory) { // we avoid taking the _cachedDataLock here every time because we would be // contending with other reader threads also returning readerworkitems. @@ -1178,14 +1186,14 @@ private void ReturnReaderWorkItem(ReaderWorkItem item) { } } - public TFChunkBulkReader AcquireReader() { - if (TryAcquireBulkMemReader(out var reader)) + public TFChunkBulkReader AcquireReader(ITransactionFileTracker tracker) { + if (TryAcquireBulkMemReader(tracker, out var reader)) return reader; - return AcquireFileReader(); + return AcquireFileReader(tracker); } - private TFChunkBulkReader AcquireFileReader() { + private TFChunkBulkReader AcquireFileReader(ITransactionFileTracker tracker) { Interlocked.Increment(ref _fileStreamCount); if (_selfdestructin54321) { if (Interlocked.Decrement(ref _fileStreamCount) == 0) { @@ -1197,7 +1205,7 @@ private TFChunkBulkReader AcquireFileReader() { // if we get here, then we reserved TFChunk for sure so no one should dispose of chunk file // until client returns dedicated reader - return new TFChunkBulkReader(this, GetSequentialReaderFileStream(), isMemory: false); + return new TFChunkBulkReader(this, GetSequentialReaderFileStream(), isMemory: false, tracker); } private Stream GetSequentialReaderFileStream() { @@ -1211,7 +1219,7 @@ private Stream GetSequentialReaderFileStream() { // (a) doesn't block if a file reader would be acceptable instead // (we might be in the middle of caching which could take a while) // (b) _does_ throw if we can't get a memstream and a filestream is not acceptable - private bool TryAcquireBulkMemReader(out TFChunkBulkReader reader) { + private bool TryAcquireBulkMemReader(ITransactionFileTracker tracker, out TFChunkBulkReader reader) { reader = null; if (IsReadOnly) { @@ -1222,7 +1230,7 @@ private bool TryAcquireBulkMemReader(out TFChunkBulkReader reader) { return false; try { - return TryCreateBulkMemReader(out reader); + return TryCreateBulkMemReader(tracker, out reader); } finally { Monitor.Exit(_cachedDataLock); } @@ -1230,7 +1238,7 @@ private bool TryAcquireBulkMemReader(out TFChunkBulkReader reader) { // chunk is not readonly so it should be cached and let us create a mem reader // (but might become readonly at any moment!) - if (TryCreateBulkMemReader(out reader)) + if (TryCreateBulkMemReader(tracker, out reader)) return true; // we couldn't get a memreader, maybe we just became readonly and got uncached. @@ -1245,7 +1253,7 @@ private bool TryAcquireBulkMemReader(out TFChunkBulkReader reader) { } // creates a bulk reader over a memstream as long as we are cached - private bool TryCreateBulkMemReader(out TFChunkBulkReader reader) { + private bool TryCreateBulkMemReader(ITransactionFileTracker tracker, out TFChunkBulkReader reader) { lock (_cachedDataLock) { if (_cacheStatus != CacheStatus.Cached) { reader = null; @@ -1257,7 +1265,7 @@ private bool TryCreateBulkMemReader(out TFChunkBulkReader reader) { Interlocked.Increment(ref _memStreamCount); var stream = new UnmanagedMemoryStream((byte*)_cachedData, _cachedLength); - reader = new TFChunkBulkReader(this, stream, isMemory: true); + reader = new TFChunkBulkReader(this, stream, isMemory: true, tracker); return true; } } diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunkReadSide.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunkReadSide.cs index 24e433e29ac..baa1cb662dd 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunkReadSide.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunk/TFChunkReadSide.cs @@ -14,18 +14,18 @@ public interface IChunkReadSide { void RequestCaching(); void Uncache(); - bool ExistsAt(long logicalPosition); - long GetActualPosition(long logicalPosition); - RecordReadResult TryReadAt(long logicalPosition, bool couldBeScavenged); - RecordReadResult TryReadFirst(); - RecordReadResult TryReadClosestForward(long logicalPosition); - RawReadResult TryReadClosestForwardRaw(long logicalPosition, Func getBuffer); - RecordReadResult TryReadLast(); - RecordReadResult TryReadClosestBackward(long logicalPosition); + bool ExistsAt(long logicalPosition, ITransactionFileTracker tracker); + long GetActualPosition(long logicalPosition, ITransactionFileTracker tracker); + RecordReadResult TryReadAt(long logicalPosition, bool couldBeScavenged, ITransactionFileTracker tracker); + RecordReadResult TryReadFirst(ITransactionFileTracker tracker); + RecordReadResult TryReadClosestForward(long logicalPosition, ITransactionFileTracker tracker); + RawReadResult TryReadClosestForwardRaw(long logicalPosition, Func getBuffer, ITransactionFileTracker tracker); + RecordReadResult TryReadLast(ITransactionFileTracker tracker); + RecordReadResult TryReadClosestBackward(long logicalPosition, ITransactionFileTracker tracker); } private class TFChunkReadSideUnscavenged : TFChunkReadSide, IChunkReadSide { - public TFChunkReadSideUnscavenged(TFChunk chunk, ITransactionFileTracker tracker) : base(chunk, tracker) { + public TFChunkReadSideUnscavenged(TFChunk chunk) : base(chunk) { if (chunk.ChunkHeader.IsScavenged) throw new ArgumentException("Scavenged TFChunk passed into unscavenged chunk read side."); } @@ -38,11 +38,11 @@ public void Uncache() { // do nothing } - public bool ExistsAt(long logicalPosition) { + public bool ExistsAt(long logicalPosition, ITransactionFileTracker tracker) { return logicalPosition >= 0 && logicalPosition < Chunk.LogicalDataSize; } - public long GetActualPosition(long logicalPosition) { + public long GetActualPosition(long logicalPosition, ITransactionFileTracker tracker) { Ensure.Nonnegative(logicalPosition, nameof(logicalPosition)); if (logicalPosition >= Chunk.LogicalDataSize) @@ -51,8 +51,8 @@ public long GetActualPosition(long logicalPosition) { return logicalPosition; } - public RecordReadResult TryReadAt(long logicalPosition, bool couldBeScavenged) { - var workItem = Chunk.GetReaderWorkItem(); + public RecordReadResult TryReadAt(long logicalPosition, bool couldBeScavenged, ITransactionFileTracker tracker) { + var workItem = Chunk.GetReaderWorkItem(tracker); try { if (logicalPosition >= Chunk.LogicalDataSize) { _log.Warning( @@ -70,12 +70,12 @@ public RecordReadResult TryReadAt(long logicalPosition, bool couldBeScavenged) { } } - public RecordReadResult TryReadFirst() { - return TryReadClosestForward(0); + public RecordReadResult TryReadFirst(ITransactionFileTracker tracker) { + return TryReadClosestForward(0, tracker); } - public RecordReadResult TryReadClosestForward(long logicalPosition) { - var workItem = Chunk.GetReaderWorkItem(); + public RecordReadResult TryReadClosestForward(long logicalPosition, ITransactionFileTracker tracker) { + var workItem = Chunk.GetReaderWorkItem(tracker); try { if (logicalPosition >= Chunk.LogicalDataSize) return RecordReadResult.Failure; @@ -90,8 +90,8 @@ public RecordReadResult TryReadClosestForward(long logicalPosition) { } } - public RawReadResult TryReadClosestForwardRaw(long logicalPosition, Func getBuffer) { - var workItem = Chunk.GetReaderWorkItem(); + public RawReadResult TryReadClosestForwardRaw(long logicalPosition, Func getBuffer, ITransactionFileTracker tracker) { + var workItem = Chunk.GetReaderWorkItem(tracker); try { if (logicalPosition >= Chunk.LogicalDataSize) return RawReadResult.Failure; @@ -106,12 +106,12 @@ public RawReadResult TryReadClosestForwardRaw(long logicalPosition, Func Chunk.LogicalDataSize) @@ -142,8 +142,8 @@ private bool CacheIsOptimized { get { return _optimizeCache && _logPositionsBloomFilter != null; } } - public TFChunkReadSideScavenged(TFChunk chunk, bool optimizeCache, ITransactionFileTracker tracker) - : base(chunk, tracker) { + public TFChunkReadSideScavenged(TFChunk chunk, bool optimizeCache) + : base(chunk) { _optimizeCache = optimizeCache; if (!chunk.ChunkHeader.IsScavenged) throw new ArgumentException(string.Format("Chunk provided is not scavenged: {0}", chunk)); @@ -191,9 +191,9 @@ private Midpoint[] GetOrCreateMidPoints(ReaderWorkItem workItem) { } } - public void OptimizeExistsAt() { + public void OptimizeExistsAt(ITransactionFileTracker tracker) { if (_optimizeCache && _logPositionsBloomFilter == null) - _logPositionsBloomFilter = PopulateBloomFilter(); + _logPositionsBloomFilter = PopulateBloomFilter(tracker); } public void DeOptimizeExistsAt() { @@ -201,7 +201,7 @@ public void DeOptimizeExistsAt() { _logPositionsBloomFilter = null; } - private InMemoryBloomFilter PopulateBloomFilter() { + private InMemoryBloomFilter PopulateBloomFilter(ITransactionFileTracker tracker) { var mapCount = Chunk.ChunkFooter.MapCount; if (mapCount <= 0) return null; @@ -227,7 +227,7 @@ private InMemoryBloomFilter PopulateBloomFilter() { ReaderWorkItem workItem = null; try { - workItem = Chunk.GetReaderWorkItem(); + workItem = Chunk.GetReaderWorkItem(tracker); foreach (var posMap in ReadPosMap(workItem, 0, mapCount)) { bf.Add(posMap.LogPos); @@ -305,11 +305,11 @@ private IEnumerable ReadPosMap(ReaderWorkItem workItem, long index, int } } - public bool ExistsAt(long logicalPosition) { + public bool ExistsAt(long logicalPosition, ITransactionFileTracker tracker) { if (CacheIsOptimized) return MayExistAt(logicalPosition); - var workItem = Chunk.GetReaderWorkItem(); + var workItem = Chunk.GetReaderWorkItem(tracker); try { var actualPosition = TranslateExactPosition(workItem, logicalPosition); return actualPosition >= 0 && actualPosition < Chunk.PhysicalDataSize; @@ -323,10 +323,10 @@ public bool MayExistAt(long logicalPosition) { return _logPositionsBloomFilter.MightContain(logicalPosition); } - public long GetActualPosition(long logicalPosition) { + public long GetActualPosition(long logicalPosition, ITransactionFileTracker tracker) { Ensure.Nonnegative(logicalPosition, nameof(logicalPosition)); - var workItem = Chunk.GetReaderWorkItem(); + var workItem = Chunk.GetReaderWorkItem(tracker); try { return TranslateExactPosition(workItem, logicalPosition); } finally { @@ -334,8 +334,8 @@ public long GetActualPosition(long logicalPosition) { } } - public RecordReadResult TryReadAt(long logicalPosition, bool couldBeScavenged) { - var workItem = Chunk.GetReaderWorkItem(); + public RecordReadResult TryReadAt(long logicalPosition, bool couldBeScavenged, ITransactionFileTracker tracker) { + var workItem = Chunk.GetReaderWorkItem(tracker); try { var actualPosition = TranslateExactPosition(workItem, logicalPosition); if (actualPosition == -1 || actualPosition >= Chunk.PhysicalDataSize) { @@ -391,15 +391,15 @@ private int TranslateExactWithMidpoints(ReaderWorkItem workItem, Midpoint[] midp return TranslateExactWithoutMidpoints(workItem, pos, recordRange.Lower, recordRange.Upper); } - public RecordReadResult TryReadFirst() { - return TryReadClosestForward(0); + public RecordReadResult TryReadFirst(ITransactionFileTracker tracker) { + return TryReadClosestForward(0, tracker); } - public RecordReadResult TryReadClosestForward(long logicalPosition) { + public RecordReadResult TryReadClosestForward(long logicalPosition, ITransactionFileTracker tracker) { if (Chunk.ChunkFooter.MapCount == 0) return RecordReadResult.Failure; - var workItem = Chunk.GetReaderWorkItem(); + var workItem = Chunk.GetReaderWorkItem(tracker); try { var actualPosition = TranslateClosestForwardPosition(workItem, logicalPosition); if (actualPosition == -1 || actualPosition >= Chunk.PhysicalDataSize) @@ -416,11 +416,11 @@ public RecordReadResult TryReadClosestForward(long logicalPosition) { } } - public RawReadResult TryReadClosestForwardRaw(long logicalPosition, Func getBuffer) { + public RawReadResult TryReadClosestForwardRaw(long logicalPosition, Func getBuffer, ITransactionFileTracker tracker) { if (Chunk.ChunkFooter.MapCount == 0) return RawReadResult.Failure; - var workItem = Chunk.GetReaderWorkItem(); + var workItem = Chunk.GetReaderWorkItem(tracker); try { var actualPosition = TranslateClosestForwardPosition(workItem, logicalPosition); if (actualPosition == -1 || actualPosition >= Chunk.PhysicalDataSize) @@ -447,15 +447,15 @@ public RawReadResult TryReadClosestForwardRaw(long logicalPosition, Func(); - protected readonly ITransactionFileTracker _tracker; - protected TFChunkReadSide(TFChunk chunk, ITransactionFileTracker tracker) { + protected TFChunkReadSide(TFChunk chunk) { Ensure.NotNull(chunk, "chunk"); Chunk = chunk; - _tracker = tracker; } private bool ValidateRecordPosition(long actualPosition) { @@ -625,7 +623,9 @@ record = null; ValidateRecordLength(length, actualPosition); record = LogRecord.ReadFrom(workItem.Reader, length); - _tracker.OnRead(record); + workItem.Tracker.OnRead(record, Chunk.IsCached ? + ITransactionFileTracker.Source.ChunkCache : + ITransactionFileTracker.Source.File); int suffixLength = workItem.Reader.ReadInt32(); ValidateSuffixLength(length, suffixLength, actualPosition); @@ -706,7 +706,9 @@ record = null; } record = LogRecord.ReadFrom(workItem.Reader, length); - _tracker.OnRead(record); + workItem.Tracker.OnRead(record, Chunk.IsCached ? + ITransactionFileTracker.Source.ChunkCache : + ITransactionFileTracker.Source.File); return true; } diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunkBulkReader.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunkBulkReader.cs index 8fdf7b9a55c..cc49d6be5dd 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunkBulkReader.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunkBulkReader.cs @@ -16,15 +16,18 @@ internal Stream Stream { private readonly TFChunk.TFChunk _chunk; private readonly Stream _stream; + private readonly ITransactionFileTracker _tfTracker; private bool _disposed; public bool IsMemory { get; init; } - internal TFChunkBulkReader(TFChunk.TFChunk chunk, Stream streamToUse, bool isMemory) { + internal TFChunkBulkReader(TFChunk.TFChunk chunk, Stream streamToUse, bool isMemory, + ITransactionFileTracker tfTracker) { Ensure.NotNull(chunk, "chunk"); Ensure.NotNull(streamToUse, "stream"); _chunk = chunk; _stream = streamToUse; IsMemory = isMemory; + _tfTracker = tfTracker; } ~TFChunkBulkReader() { @@ -62,6 +65,11 @@ public BulkReadResult ReadNextRawBytes(int count, byte[] buffer) { var oldPos = (int)_stream.Position; int bytesRead = _stream.Read(buffer, 0, count); + + _tfTracker.OnRead(bytesRead, IsMemory + ? ITransactionFileTracker.Source.ChunkCache + : ITransactionFileTracker.Source.File); + return new BulkReadResult(oldPos, bytesRead, isEof: _stream.Length == _stream.Position); } @@ -80,6 +88,11 @@ public BulkReadResult ReadNextDataBytes(int count, byte[] buffer) { Debug.Assert(toRead >= 0); _stream.Position = _stream.Position; // flush read buffer int bytesRead = _stream.Read(buffer, 0, toRead); + + _tfTracker.OnRead(bytesRead, IsMemory + ? ITransactionFileTracker.Source.ChunkCache + : ITransactionFileTracker.Source.File); + return new BulkReadResult(oldPos, bytesRead, isEof: _chunk.IsReadOnly && oldPos + bytesRead == _chunk.PhysicalDataSize); diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunkChaser.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunkChaser.cs index fcff4c9eff8..aeae97b9e14 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunkChaser.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunkChaser.cs @@ -9,15 +9,18 @@ public ICheckpoint Checkpoint { } private readonly ICheckpoint _chaserCheckpoint; + private readonly ITransactionFileTracker _tfTracker; private readonly TFChunkReader _reader; public TFChunkChaser(TFChunkDb db, IReadOnlyCheckpoint writerCheckpoint, ICheckpoint chaserCheckpoint, - bool optimizeReadSideCache) { + bool optimizeReadSideCache, + ITransactionFileTracker tfTracker) { Ensure.NotNull(db, "dbConfig"); Ensure.NotNull(writerCheckpoint, "writerCheckpoint"); Ensure.NotNull(chaserCheckpoint, "chaserCheckpoint"); _chaserCheckpoint = chaserCheckpoint; + _tfTracker = tfTracker; _reader = new TFChunkReader(db, writerCheckpoint, _chaserCheckpoint.Read(), optimizeReadSideCache); } @@ -32,7 +35,7 @@ record = res.LogRecord; } public SeqReadResult TryReadNext() { - var res = _reader.TryReadNext(); + var res = _reader.TryReadNext(_tfTracker); if (res.Success) _chaserCheckpoint.Write(res.RecordPostPosition); else diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunkDb.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunkDb.cs index 8333c1f515f..9728b0c0054 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunkDb.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunkDb.cs @@ -10,17 +10,17 @@ namespace EventStore.Core.TransactionLog.Chunks { public class TFChunkDb : IDisposable { public readonly TFChunkDbConfig Config; + private readonly ITransactionFileTracker _tracker; public readonly TFChunkManager Manager; private readonly ILogger _log; - private readonly ITransactionFileTracker _tracker; private int _closed; public TFChunkDb(TFChunkDbConfig config, ITransactionFileTracker tracker = null, ILogger log = null) { Ensure.NotNull(config, "config"); Config = config; - _tracker = tracker ?? new TFChunkTracker.NoOp(); + _tracker = tracker ?? ITransactionFileTracker.NoOp; Manager = new TFChunkManager(Config, _tracker); _log = log ?? Serilog.Log.ForContext(); } @@ -76,9 +76,9 @@ public void Open(bool verifyHash = true, bool readOnly = false, int threads = 1) unbufferedRead: Config.Unbuffered, initialReaderCount: Config.InitialReaderCount, maxReaderCount: Config.MaxReaderCount, - tracker: _tracker, optimizeReadSideCache: Config.OptimizeReadSideCache, - reduceFileCachePressure: Config.ReduceFileCachePressure); + reduceFileCachePressure: Config.ReduceFileCachePressure, + tracker: _tracker); else { chunk = TFChunk.TFChunk.FromOngoingFile(chunkInfo.ChunkFileName, Config.ChunkSize, checkSize: false, @@ -179,7 +179,7 @@ public void Open(bool verifyHash = true, bool readOnly = false, int threads = 1) for (int chunkNum = lastBgChunkNum; chunkNum >= 0;) { var chunk = Manager.GetChunk(chunkNum); try { - chunk.VerifyFileHash(); + chunk.VerifyFileHash(_tracker); } catch (FileBeingDeletedException exc) { _log.Debug( "{exceptionType} exception was thrown while doing background validation of chunk {chunk}.", diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunkManager.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunkManager.cs index 21a1018e587..4dc850993b1 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunkManager.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunkManager.cs @@ -88,7 +88,7 @@ private void CacheUncacheReadOnlyChunks() { for (int chunkNum = lastChunkToCache; chunkNum < _chunksCount;) { var chunk = _chunks[chunkNum]; if (chunk.IsReadOnly) - chunk.CacheInMemory(); + chunk.CacheInMemory(_tracker); chunkNum = chunk.ChunkHeader.ChunkEndNumber + 1; } } @@ -122,7 +122,7 @@ public TFChunk.TFChunk AddNewChunk() { initialReaderCount: _config.InitialReaderCount, maxReaderCount: _config.MaxReaderCount, reduceFileCachePressure: _config.ReduceFileCachePressure, - tracker: _tracker); + _tracker); AddChunk(chunk); return chunk; } @@ -148,7 +148,7 @@ public TFChunk.TFChunk AddNewChunk(ChunkHeader chunkHeader, int fileSize) { initialReaderCount: _config.InitialReaderCount, maxReaderCount: _config.MaxReaderCount, reduceFileCachePressure: _config.ReduceFileCachePressure, - tracker: _tracker); + _tracker); AddChunk(chunk); return chunk; } diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunkReader.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunkReader.cs index 062bbc0c707..c166aac7e77 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunkReader.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunkReader.cs @@ -42,11 +42,11 @@ public void Reposition(long position) { _curPos = position; } - public SeqReadResult TryReadNext() { - return TryReadNextInternal(0); + public SeqReadResult TryReadNext(ITransactionFileTracker tracker) { + return TryReadNextInternal(0, tracker); } - private SeqReadResult TryReadNextInternal(int retries) { + private SeqReadResult TryReadNextInternal(int retries, ITransactionFileTracker tracker) { while (true) { var pos = _curPos; var writerChk = _writerCheckpoint.Read(); @@ -56,7 +56,7 @@ private SeqReadResult TryReadNextInternal(int retries) { var chunk = _db.Manager.GetChunkFor(pos); RecordReadResult result; try { - result = chunk.TryReadClosestForward(chunk.ChunkHeader.GetLocalLogPosition(pos)); + result = chunk.TryReadClosestForward(chunk.ChunkHeader.GetLocalLogPosition(pos), tracker); CountRead(chunk.IsCached); } catch (FileBeingDeletedException) { if (retries > MaxRetries) @@ -64,7 +64,7 @@ private SeqReadResult TryReadNextInternal(int retries) { string.Format( "Got a file that was being deleted {0} times from TFChunkDb, likely a bug there.", MaxRetries)); - return TryReadNextInternal(retries + 1); + return TryReadNextInternal(retries + 1, tracker); } if (result.Success) { @@ -81,11 +81,11 @@ private SeqReadResult TryReadNextInternal(int retries) { } } - public SeqReadResult TryReadPrev() { - return TryReadPrevInternal(0); + public SeqReadResult TryReadPrev(ITransactionFileTracker tracker) { + return TryReadPrevInternal(0, tracker); } - private SeqReadResult TryReadPrevInternal(int retries) { + private SeqReadResult TryReadPrevInternal(int retries, ITransactionFileTracker tracker) { while (true) { var pos = _curPos; var writerChk = _writerCheckpoint.Read(); @@ -109,15 +109,15 @@ private SeqReadResult TryReadPrevInternal(int retries) { RecordReadResult result; try { result = readLast - ? chunk.TryReadLast() - : chunk.TryReadClosestBackward(chunk.ChunkHeader.GetLocalLogPosition(pos)); + ? chunk.TryReadLast(tracker) + : chunk.TryReadClosestBackward(chunk.ChunkHeader.GetLocalLogPosition(pos), tracker); CountRead(chunk.IsCached); } catch (FileBeingDeletedException) { if (retries > MaxRetries) throw new Exception(string.Format( "Got a file that was being deleted {0} times from TFChunkDb, likely a bug there.", MaxRetries)); - return TryReadPrevInternal(retries + 1); + return TryReadPrevInternal(retries + 1, tracker); } if (result.Success) { @@ -137,11 +137,11 @@ private SeqReadResult TryReadPrevInternal(int retries) { } } - public RecordReadResult TryReadAt(long position, bool couldBeScavenged) { - return TryReadAtInternal(position, couldBeScavenged, 0); + public RecordReadResult TryReadAt(long position, bool couldBeScavenged, ITransactionFileTracker tracker) { + return TryReadAtInternal(position, couldBeScavenged, 0, tracker); } - private RecordReadResult TryReadAtInternal(long position, bool couldBeScavenged, int retries) { + private RecordReadResult TryReadAtInternal(long position, bool couldBeScavenged, int retries, ITransactionFileTracker tracker) { var writerChk = _writerCheckpoint.Read(); if (position >= writerChk) { _log.Warning( @@ -153,20 +153,20 @@ private RecordReadResult TryReadAtInternal(long position, bool couldBeScavenged, var chunk = _db.Manager.GetChunkFor(position); try { CountRead(chunk.IsCached); - return chunk.TryReadAt(chunk.ChunkHeader.GetLocalLogPosition(position), couldBeScavenged); + return chunk.TryReadAt(chunk.ChunkHeader.GetLocalLogPosition(position), couldBeScavenged, tracker); } catch (FileBeingDeletedException) { if (retries > MaxRetries) throw new FileBeingDeletedException( "Been told the file was deleted > MaxRetries times. Probably a problem in db."); - return TryReadAtInternal(position, couldBeScavenged, retries + 1); + return TryReadAtInternal(position, couldBeScavenged, retries + 1, tracker); } } - public bool ExistsAt(long position) { - return ExistsAtInternal(position, 0); + public bool ExistsAt(long position, ITransactionFileTracker tracker) { + return ExistsAtInternal(position, 0, tracker); } - private bool ExistsAtInternal(long position, int retries) { + private bool ExistsAtInternal(long position, int retries, ITransactionFileTracker tracker) { var writerChk = _writerCheckpoint.Read(); if (position >= writerChk) return false; @@ -176,12 +176,12 @@ private bool ExistsAtInternal(long position, int retries) { CountRead(chunk.IsCached); if (_optimizeReadSideCache) _existsAtOptimizer.Optimize(chunk); - return chunk.ExistsAt(chunk.ChunkHeader.GetLocalLogPosition(position)); + return chunk.ExistsAt(chunk.ChunkHeader.GetLocalLogPosition(position), tracker); } catch (FileBeingDeletedException) { if (retries > MaxRetries) throw new FileBeingDeletedException( "Been told the file was deleted > MaxRetries times. Probably a problem in db."); - return ExistsAtInternal(position, retries + 1); + return ExistsAtInternal(position, retries + 1, tracker); } } diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunkReaderExistsAtOptimizer.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunkReaderExistsAtOptimizer.cs index 52ac822570b..aad15036b53 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunkReaderExistsAtOptimizer.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunkReaderExistsAtOptimizer.cs @@ -34,7 +34,7 @@ public TFChunkReaderExistsAtOptimizer(int maxCached) { if (chunk == null) return false; Log.Debug("Optimizing chunk {chunk} for fast merge...", chunk.FileName); - chunk.OptimizeExistsAt(); + chunk.OptimizeExistsAt(ITransactionFileTracker.NoOp); // noop ok, deprecated path return true; }; diff --git a/src/EventStore.Core/TransactionLog/Chunks/TFChunkScavenger.cs b/src/EventStore.Core/TransactionLog/Chunks/TFChunkScavenger.cs index 2430155e306..7ea3a5b1db2 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TFChunkScavenger.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TFChunkScavenger.cs @@ -16,6 +16,7 @@ using EventStore.Core.TransactionLog.LogRecords; using EventStore.Core.TransactionLog.Scavenging; using EventStore.LogCommon; +using OpenTelemetry.Trace; using ILogger = Serilog.ILogger; namespace EventStore.Core.TransactionLog.Chunks { @@ -32,12 +33,15 @@ public class TFChunkScavenger : TFChunkScavenger { private readonly ITableIndex _tableIndex; private readonly IReadIndex _readIndex; private readonly IMetastreamLookup _metastreams; + private readonly ITransactionFileTracker _tfTracker; private readonly long _maxChunkDataSize; private readonly bool _unsafeIgnoreHardDeletes; private readonly int _threads; public TFChunkScavenger(ILogger logger, TFChunkDb db, ITFChunkScavengerLog scavengerLog, ITableIndex tableIndex, - IReadIndex readIndex, IMetastreamLookup metastreams, long? maxChunkDataSize = null, + IReadIndex readIndex, IMetastreamLookup metastreams, + ITransactionFileTracker tfTracker, + long? maxChunkDataSize = null, bool unsafeIgnoreHardDeletes = false, int threads = 1) { Ensure.NotNull(logger, nameof(logger)); Ensure.NotNull(db, "db"); @@ -61,6 +65,7 @@ public TFChunkScavenger(ILogger logger, TFChunkDb db, ITFChunkScavengerLog scave _tableIndex = tableIndex; _readIndex = readIndex; _metastreams = metastreams; + _tfTracker = tfTracker; _maxChunkDataSize = maxChunkDataSize ?? db.Config.ChunkSize; _unsafeIgnoreHardDeletes = unsafeIgnoreHardDeletes; _threads = threads; @@ -157,6 +162,7 @@ private void ScavengeInternal(bool alwaysKeepScavenged, bool mergeChunks, int st maxChunkDataSize: _maxChunkDataSize, scavengerLog: _scavengerLog, throttle: new Throttle(_logger, TimeSpan.Zero, TimeSpan.Zero, 100), + tracker: _tfTracker, ct: ct); } @@ -196,7 +202,7 @@ private void ScavengeChunk(bool alwaysKeepScavenged, TFChunk.TFChunk oldChunk, initialReaderCount: _db.Config.InitialReaderCount, maxReaderCount: _db.Config.MaxReaderCount, reduceFileCachePressure: _db.Config.ReduceFileCachePressure, - tracker: new TFChunkTracker.NoOp()); + _tfTracker); } catch (IOException exc) { _logger.Error(exc, "IOException during creating new chunk for scavenging purposes. Stopping scavenging process..."); @@ -204,7 +210,7 @@ private void ScavengeChunk(bool alwaysKeepScavenged, TFChunk.TFChunk oldChunk, } try { - TraverseChunkBasic(oldChunk, ct, + TraverseChunkBasic(oldChunk, ct, _tfTracker, result => { threadLocalCache.Records.Add(result); @@ -337,6 +343,7 @@ public static void MergePhase( long maxChunkDataSize, ITFChunkScavengerLog scavengerLog, Throttle throttle, + ITransactionFileTracker tracker, CancellationToken ct) { bool mergedSomething; @@ -362,6 +369,7 @@ public static void MergePhase( db: db, scavengerLog: scavengerLog, oldChunks: chunksToMerge, + tracker: tracker, ct: ct)) { mergedSomething = true; @@ -382,6 +390,7 @@ public static void MergePhase( db: db, scavengerLog: scavengerLog, oldChunks: chunksToMerge, + tracker: tracker, ct: ct)) { mergedSomething = true; @@ -398,6 +407,7 @@ private static bool MergeChunks( TFChunkDb db, ITFChunkScavengerLog scavengerLog, IList oldChunks, + ITransactionFileTracker tracker, CancellationToken ct) { if (oldChunks.IsEmpty()) throw new ArgumentException("Provided list of chunks to merge is empty."); @@ -432,7 +442,7 @@ private static bool MergeChunks( initialReaderCount: db.Config.InitialReaderCount, maxReaderCount: db.Config.MaxReaderCount, reduceFileCachePressure: db.Config.ReduceFileCachePressure, - tracker: new TFChunkTracker.NoOp()); + tracker); } catch (IOException exc) { logger.Error(exc, "IOException during creating new chunk for scavenging merge purposes. Stopping scavenging merge process..."); @@ -445,7 +455,7 @@ private static bool MergeChunks( var positionMapping = new List(); foreach (var oldChunk in oldChunks) { var lastFlushedPage = -1; - TraverseChunkBasic(oldChunk, ct, + TraverseChunkBasic(oldChunk, ct, tracker, result => { positionMapping.Add(WriteRecord(newChunk, result.LogRecord)); @@ -614,7 +624,7 @@ private bool ShouldKeepPrepare( return true; } - var lastEventNumber = _readIndex.GetStreamLastEventNumber(prepare.EventStreamId); + var lastEventNumber = _readIndex.GetStreamLastEventNumber(prepare.EventStreamId, _tfTracker); if (lastEventNumber == EventNumber.DeletedStream) { // The stream is hard deleted but this is not the tombstone. // When all prepares and commit of transaction belong to single chunk and the stream is deleted, @@ -672,7 +682,7 @@ private bool ShouldKeepPrepare( return true; } - var meta = _readIndex.GetStreamMetadata(prepare.EventStreamId); + var meta = _readIndex.GetStreamMetadata(prepare.EventStreamId, _tfTracker); bool canRemove = (meta.MaxCount.HasValue && eventNumber < lastEventNumber - meta.MaxCount.Value + 1) || (meta.TruncateBefore.HasValue && eventNumber < meta.TruncateBefore.Value) || (meta.MaxAge.HasValue && prepare.TimeStamp < DateTime.UtcNow - meta.MaxAge.Value); @@ -687,7 +697,7 @@ private bool ShouldKeepPrepare( } private bool DiscardBecauseDuplicate(IPrepareLogRecord prepare, long eventNumber) { - var result = _readIndex.ReadEvent(IndexReader.UnspecifiedStreamName, prepare.EventStreamId, eventNumber); + var result = _readIndex.ReadEvent(IndexReader.UnspecifiedStreamName, prepare.EventStreamId, eventNumber, _tfTracker); if (result.Result == ReadEventResult.Success && result.Record.LogPosition != prepare.LogPosition) { // prepare isn't the record we get for an index read at its own stream/version. // therefore it is a duplicate that cannot be read from the index, discard it. @@ -702,13 +712,13 @@ private bool IsSoftDeletedTempStreamWithinSameChunk(TStreamId eventStreamId, lon TStreamId msh; if (_metastreams.IsMetaStream(eventStreamId)) { var originalStreamId = _metastreams.OriginalStreamOf(eventStreamId); - var meta = _readIndex.GetStreamMetadata(originalStreamId); + var meta = _readIndex.GetStreamMetadata(originalStreamId, _tfTracker); if (meta.TruncateBefore != EventNumber.DeletedStream || meta.TempStream != true) return false; sh = originalStreamId; msh = eventStreamId; } else { - var meta = _readIndex.GetStreamMetadata(eventStreamId); + var meta = _readIndex.GetStreamMetadata(eventStreamId, _tfTracker); if (meta.TruncateBefore != EventNumber.DeletedStream || meta.TempStream != true) return false; sh = eventStreamId; @@ -728,14 +738,15 @@ private bool IsSoftDeletedTempStreamWithinSameChunk(TStreamId eventStreamId, lon } private static void TraverseChunkBasic(TFChunk.TFChunk chunk, CancellationToken ct, + ITransactionFileTracker tracker, Action process) { - var result = chunk.TryReadFirst(); + var result = chunk.TryReadFirst(tracker); while (result.Success) { process(new CandidateRecord(result.LogRecord, result.RecordLength)); ct.ThrowIfCancellationRequested(); - result = chunk.TryReadClosestForward(result.NextPosition); + result = chunk.TryReadClosestForward(result.NextPosition, tracker); } } diff --git a/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs b/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs index 964fde3e0fa..3df289cb1fb 100644 --- a/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs +++ b/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTracker.cs @@ -1,31 +1,53 @@ #nullable enable + +using System.Collections.Generic; using EventStore.Core.Metrics; using EventStore.Core.TransactionLog.LogRecords; namespace EventStore.Core.TransactionLog.Chunks; public class TFChunkTracker : ITransactionFileTracker { - private readonly CounterSubMetric _readBytes; - private readonly CounterSubMetric _readEvents; + private readonly (CounterSubMetric Events, CounterSubMetric Bytes)[] _subMetrics; + + public TFChunkTracker(CounterMetric eventMetric, CounterMetric byteMetric, string user) { + _subMetrics = new (CounterSubMetric, CounterSubMetric)[(int)(ITransactionFileTracker.Source.EnumLength)]; - public TFChunkTracker( - CounterSubMetric readBytes, - CounterSubMetric readEvents) { + var unknownEvents = CreateSubMetric(eventMetric, "unknown", user); + var unknownBytes = CreateSubMetric(byteMetric, "unknown", user); - _readBytes = readBytes; - _readEvents = readEvents; + for (var i = 0; i < _subMetrics.Length; i++) { + var sourceName = NameOf((ITransactionFileTracker.Source)i); + var isUnknown = string.IsNullOrWhiteSpace(sourceName); + _subMetrics[i] = ( + Events: isUnknown ? unknownEvents : CreateSubMetric(eventMetric, sourceName, user), + Bytes: isUnknown ? unknownBytes : CreateSubMetric(byteMetric, sourceName, user)); + } } - public void OnRead(ILogRecord record) { + static string NameOf(ITransactionFileTracker.Source source) => source switch { + ITransactionFileTracker.Source.ChunkCache => "chunk-cache", + ITransactionFileTracker.Source.File => "file", + _ => "", + }; + + static CounterSubMetric CreateSubMetric(CounterMetric metric, string source, string user) { + var readTag = new KeyValuePair("activity", "read"); + var sourceTag = new KeyValuePair("source", source); + var userTag = new KeyValuePair("user", user); + return new CounterSubMetric(metric, [readTag, sourceTag, userTag]); + } + + public void OnRead(ILogRecord record, ITransactionFileTracker.Source source) { if (record is not PrepareLogRecord prepare) return; - _readBytes.Add(prepare.Data.Length + prepare.Metadata.Length); - _readEvents.Add(1); + var subMetrics = _subMetrics[(int)source]; + subMetrics.Bytes.Add(prepare.Data.Length + prepare.Metadata.Length); // approximate + subMetrics.Events.Add(1); } - public class NoOp : ITransactionFileTracker { - public void OnRead(ILogRecord record) { - } + public void OnRead(int bytesRead, ITransactionFileTracker.Source source) { + var subMetrics = _subMetrics[(int)source]; + subMetrics.Bytes.Add(bytesRead); } } diff --git a/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTrackerFactory.cs b/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTrackerFactory.cs new file mode 100644 index 00000000000..69a91cf622b --- /dev/null +++ b/src/EventStore.Core/TransactionLog/Chunks/TransactionFileTrackerFactory.cs @@ -0,0 +1,30 @@ +#nullable enable + +using System.Collections.Concurrent; +using EventStore.Core.Metrics; + +namespace EventStore.Core.TransactionLog.Chunks; + +public class TransactionFileTrackerFactory : ITransactionFileTrackerFactory { + private readonly ConcurrentDictionary _trackersByUser = new(); + private readonly CounterMetric _eventMetric; + private readonly CounterMetric _byteMetric; + + public TransactionFileTrackerFactory(CounterMetric eventMetric, CounterMetric byteMetric) { + _eventMetric = eventMetric; + _byteMetric = byteMetric; + } + + public ITransactionFileTracker GetOrAdd(string user) { + return _trackersByUser.GetOrAdd(user, Create, (_eventMetric, _byteMetric)); + } + + private static ITransactionFileTracker Create(string user, (CounterMetric EventMetric, CounterMetric ByteMetric) metrics) { + var tracker = new TFChunkTracker(metrics.EventMetric, metrics.ByteMetric, user); + return tracker; + } + + public void Clear() { + _trackersByUser.Clear(); + } +} diff --git a/src/EventStore.Core/TransactionLog/ITransactionFileReader.cs b/src/EventStore.Core/TransactionLog/ITransactionFileReader.cs index f431f9c39a8..879cf4b8c52 100644 --- a/src/EventStore.Core/TransactionLog/ITransactionFileReader.cs +++ b/src/EventStore.Core/TransactionLog/ITransactionFileReader.cs @@ -5,30 +5,34 @@ namespace EventStore.Core.TransactionLog { public interface ITransactionFileReader { void Reposition(long position); - SeqReadResult TryReadNext(); - SeqReadResult TryReadPrev(); + SeqReadResult TryReadNext(ITransactionFileTracker tracker); + SeqReadResult TryReadPrev(ITransactionFileTracker tracker); - RecordReadResult TryReadAt(long position, bool couldBeScavenged); - bool ExistsAt(long position); + RecordReadResult TryReadAt(long position, bool couldBeScavenged, ITransactionFileTracker tracker); + bool ExistsAt(long position, ITransactionFileTracker tracker); } - public struct TFReaderLease : IDisposable { + public readonly struct TFReaderLease : IDisposable { public readonly ITransactionFileReader Reader; + private readonly ITransactionFileTracker _tracker; private readonly ObjectPool _pool; - public TFReaderLease(ObjectPool pool) { + public TFReaderLease(ObjectPool pool, ITransactionFileTracker tracker) { _pool = pool; + _tracker = tracker; Reader = pool.Get(); } - public TFReaderLease(ITransactionFileReader reader) { + public TFReaderLease(ITransactionFileReader reader, ITransactionFileTracker tracker) { _pool = null; + _tracker = tracker; Reader = reader; } void IDisposable.Dispose() { - if (_pool != null) + if (_pool != null) { _pool.Return(Reader); + } } public void Reposition(long position) { @@ -36,19 +40,19 @@ public void Reposition(long position) { } public SeqReadResult TryReadNext() { - return Reader.TryReadNext(); + return Reader.TryReadNext(_tracker); } public SeqReadResult TryReadPrev() { - return Reader.TryReadPrev(); + return Reader.TryReadPrev(_tracker); } public bool ExistsAt(long position) { - return Reader.ExistsAt(position); + return Reader.ExistsAt(position, _tracker); } public RecordReadResult TryReadAt(long position, bool couldBeScavenged) { - return Reader.TryReadAt(position, couldBeScavenged); + return Reader.TryReadAt(position, couldBeScavenged, _tracker); } } } diff --git a/src/EventStore.Core/TransactionLog/ITransactionFileTracker.cs b/src/EventStore.Core/TransactionLog/ITransactionFileTracker.cs index 861f712fb21..fa2cf00ef35 100644 --- a/src/EventStore.Core/TransactionLog/ITransactionFileTracker.cs +++ b/src/EventStore.Core/TransactionLog/ITransactionFileTracker.cs @@ -1,7 +1,25 @@ +#nullable enable + using EventStore.Core.TransactionLog.LogRecords; namespace EventStore.Core.TransactionLog; public interface ITransactionFileTracker { - void OnRead(ILogRecord record); + void OnRead(ILogRecord record, Source source); + void OnRead(int bytesRead, Source source); + + enum Source { + Unknown, + Archive, + ChunkCache, + File, + EnumLength, + }; + + static readonly ITransactionFileTracker NoOp = new NoOp(); +} + +file class NoOp : ITransactionFileTracker { + public void OnRead(ILogRecord record, ITransactionFileTracker.Source source) { } + public void OnRead(int bytesRead, ITransactionFileTracker.Source source) { } } diff --git a/src/EventStore.Core/TransactionLog/ITransactionFileTrackerFactory.cs b/src/EventStore.Core/TransactionLog/ITransactionFileTrackerFactory.cs new file mode 100644 index 00000000000..ba4e8781ee0 --- /dev/null +++ b/src/EventStore.Core/TransactionLog/ITransactionFileTrackerFactory.cs @@ -0,0 +1,12 @@ +#nullable enable + +namespace EventStore.Core.TransactionLog; + +public interface ITransactionFileTrackerFactory { + ITransactionFileTracker GetOrAdd(string name); + static readonly ITransactionFileTrackerFactory NoOp = new NoOp(); +} + +file class NoOp : ITransactionFileTrackerFactory { + public ITransactionFileTracker GetOrAdd(string name) => ITransactionFileTracker.NoOp; +} diff --git a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkManagerForExecutor.cs b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkManagerForExecutor.cs index 6d4120f3110..cea3a52665a 100644 --- a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkManagerForExecutor.cs +++ b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkManagerForExecutor.cs @@ -9,22 +9,25 @@ public class ChunkManagerForExecutor : IChunkManagerForChunkExecutor< private readonly ILogger _logger; private readonly TFChunkManager _manager; private readonly TFChunkDbConfig _dbConfig; + private readonly ITransactionFileTracker _tracker; - public ChunkManagerForExecutor(ILogger logger, TFChunkManager manager, TFChunkDbConfig dbConfig) { + public ChunkManagerForExecutor(ILogger logger, TFChunkManager manager, TFChunkDbConfig dbConfig, + ITransactionFileTracker tracker) { _logger = logger; _manager = manager; _dbConfig = dbConfig; + _tracker = tracker; } public IChunkWriterForExecutor CreateChunkWriter( IChunkReaderForExecutor sourceChunk) { - return new ChunkWriterForExecutor(_logger, this, _dbConfig, sourceChunk); + return new ChunkWriterForExecutor(_logger, this, _dbConfig, sourceChunk, _tracker); } public IChunkReaderForExecutor GetChunkReaderFor(long position) { var tfChunk = _manager.GetChunkFor(position); - return new ChunkReaderForExecutor(tfChunk); + return new ChunkReaderForExecutor(tfChunk, _tracker); } public void SwitchChunk( diff --git a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkReaderForAccumulator.cs b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkReaderForAccumulator.cs index 7bb463040e7..85d1b8a10d8 100644 --- a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkReaderForAccumulator.cs +++ b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkReaderForAccumulator.cs @@ -14,6 +14,7 @@ public class ChunkReaderForAccumulator : IChunkReaderForAccumulator _metaStreamLookup; private readonly IStreamIdConverter _streamIdConverter; private readonly ICheckpoint _replicationChk; + private readonly ITransactionFileTracker _tracker; private readonly int _chunkSize; private readonly Func _getBuffer; @@ -24,12 +25,14 @@ public ChunkReaderForAccumulator( IMetastreamLookup metastreamLookup, IStreamIdConverter streamIdConverter, ICheckpoint replicationChk, + ITransactionFileTracker tracker, int chunkSize) { _manager = manager; _metaStreamLookup = metastreamLookup; _streamIdConverter = streamIdConverter; _replicationChk = replicationChk; + _tracker = tracker; _chunkSize = chunkSize; var reusableRecordBuffer = new ReusableBuffer(8192); @@ -62,7 +65,7 @@ public IEnumerable ReadChunkInto( var localPos = chunk.ChunkHeader.GetLocalLogPosition(nextPos); - var result = chunk.TryReadClosestForwardRaw(localPos, _getBuffer); + var result = chunk.TryReadClosestForwardRaw(localPos, _getBuffer, _tracker); if (!result.Success) { // there is no need to release the reusable buffer here since result.Success is false diff --git a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkReaderForExecutor.cs b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkReaderForExecutor.cs index a66c382518f..88ab21045bb 100644 --- a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkReaderForExecutor.cs +++ b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkReaderForExecutor.cs @@ -6,9 +6,12 @@ namespace EventStore.Core.TransactionLog.Scavenging { public class ChunkReaderForExecutor : IChunkReaderForExecutor { private readonly TFChunk _chunk; + private readonly ITransactionFileTracker _tracker; - public ChunkReaderForExecutor(TFChunk chunk) { + public ChunkReaderForExecutor(TFChunk chunk, + ITransactionFileTracker tracker) { _chunk = chunk; + _tracker = tracker; } public string Name => _chunk.ToString(); @@ -30,7 +33,7 @@ public IEnumerable ReadInto( RecordForExecutor.NonPrepare nonPrepare, RecordForExecutor.Prepare prepare) { - var result = _chunk.TryReadFirst(); + var result = _chunk.TryReadFirst(_tracker); while (result.Success) { var record = result.LogRecord; if (record.RecordType != LogRecordType.Prepare) { @@ -51,7 +54,7 @@ public IEnumerable ReadInto( yield return true; } - result = _chunk.TryReadClosestForward(result.NextPosition); + result = _chunk.TryReadClosestForward(result.NextPosition, _tracker); } } } diff --git a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkWriterForExecutor.cs b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkWriterForExecutor.cs index 222d6279149..665969576c6 100644 --- a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkWriterForExecutor.cs +++ b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/ChunkWriterForExecutor.cs @@ -20,7 +20,8 @@ public ChunkWriterForExecutor( ILogger logger, ChunkManagerForExecutor manager, TFChunkDbConfig dbConfig, - IChunkReaderForExecutor sourceChunk) { + IChunkReaderForExecutor sourceChunk, + ITransactionFileTracker tracker) { _logger = logger; _manager = manager; @@ -45,7 +46,7 @@ public ChunkWriterForExecutor( initialReaderCount: dbConfig.InitialReaderCount, maxReaderCount: dbConfig.MaxReaderCount, reduceFileCachePressure: dbConfig.ReduceFileCachePressure, - tracker: new TFChunkTracker.NoOp()); + tracker); } public string FileName { get; } diff --git a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/IndexReaderForAccumulator.cs b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/IndexReaderForAccumulator.cs index 994fad8f075..d1fedb42ef2 100644 --- a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/IndexReaderForAccumulator.cs +++ b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/IndexReaderForAccumulator.cs @@ -4,9 +4,11 @@ namespace EventStore.Core.TransactionLog.Scavenging { public class IndexReaderForAccumulator : IIndexReaderForAccumulator { private readonly IReadIndex _readIndex; + private readonly ITransactionFileTracker _tracker; - public IndexReaderForAccumulator(IReadIndex readIndex) { + public IndexReaderForAccumulator(IReadIndex readIndex, ITransactionFileTracker tracker) { _readIndex = readIndex; + _tracker = tracker; } // reads a stream forward but only returns event info not the full event. @@ -30,7 +32,8 @@ public IndexReadEventInfoResult ReadEventInfoForward( handle.StreamId, fromEventNumber, maxCount, - scavengePoint.Position); + scavengePoint.Position, + _tracker); default: throw new ArgumentOutOfRangeException(nameof(handle), handle, null); } @@ -52,14 +55,16 @@ public IndexReadEventInfoResult ReadEventInfoBackward( _ => streamId, fromEventNumber, maxCount, - scavengePoint.Position); + scavengePoint.Position, + _tracker); case StreamHandle.Kind.Id: // uses log to check for hash collisions return _readIndex.ReadEventInfoBackward_KnownCollisions( handle.StreamId, fromEventNumber, maxCount, - scavengePoint.Position); + scavengePoint.Position, + _tracker); default: throw new ArgumentOutOfRangeException(nameof(handle), handle, null); } diff --git a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/IndexReaderForCalculator.cs b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/IndexReaderForCalculator.cs index 592e09e1f4d..655938ddbbe 100644 --- a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/IndexReaderForCalculator.cs +++ b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/IndexReaderForCalculator.cs @@ -7,15 +7,18 @@ public class IndexReaderForCalculator : IIndexReaderForCalculator _readIndex; private readonly Func _tfReaderFactory; private readonly Func _lookupUniqueHashUser; + private readonly ITransactionFileTracker _tracker; public IndexReaderForCalculator( IReadIndex readIndex, Func tfReaderFactory, - Func lookupUniqueHashUser) { + Func lookupUniqueHashUser, + ITransactionFileTracker tracker) { _readIndex = readIndex; _tfReaderFactory = tfReaderFactory; _lookupUniqueHashUser = lookupUniqueHashUser; + _tracker = tracker; } public long GetLastEventNumber( @@ -28,12 +31,12 @@ public long GetLastEventNumber( return _readIndex.GetStreamLastEventNumber_NoCollisions( handle.StreamHash, _lookupUniqueHashUser, - scavengePoint.Position); + scavengePoint.Position, _tracker); case StreamHandle.Kind.Id: // uses the index and the log to fetch the last event number return _readIndex.GetStreamLastEventNumber_KnownCollisions( handle.StreamId, - scavengePoint.Position); + scavengePoint.Position, _tracker); default: throw new ArgumentOutOfRangeException(nameof(handle), handle, null); } @@ -59,7 +62,7 @@ public IndexReadEventInfoResult ReadEventInfoForward( handle.StreamId, fromEventNumber, maxCount, - scavengePoint.Position); + scavengePoint.Position, _tracker); default: throw new ArgumentOutOfRangeException(nameof(handle), handle, null); } diff --git a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/OldScavengeChunkMergerBackend.cs b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/OldScavengeChunkMergerBackend.cs index 28a3dbad62d..b7f5e15eba8 100644 --- a/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/OldScavengeChunkMergerBackend.cs +++ b/src/EventStore.Core/TransactionLog/Scavenging/DbAccess/OldScavengeChunkMergerBackend.cs @@ -6,10 +6,12 @@ namespace EventStore.Core.TransactionLog.Scavenging { public class OldScavengeChunkMergerBackend : IChunkMergerBackend { private readonly ILogger _logger; private readonly TFChunkDb _db; + private readonly ITransactionFileTracker _tracker; - public OldScavengeChunkMergerBackend(ILogger logger, TFChunkDb db) { + public OldScavengeChunkMergerBackend(ILogger logger, TFChunkDb db, ITransactionFileTracker tracker) { _logger = logger; _db = db; + _tracker = tracker; } public void MergeChunks( @@ -29,6 +31,7 @@ public void MergeChunks( maxChunkDataSize: _db.Config.ChunkSize, scavengerLog: scavengerLogger, throttle: throttle, + tracker: _tracker, ct: cancellationToken); } }