diff --git a/src/AsyncGenerator.yml b/src/AsyncGenerator.yml index 3c541dc7315..47966a224ff 100644 --- a/src/AsyncGenerator.yml +++ b/src/AsyncGenerator.yml @@ -107,7 +107,7 @@ - conversion: ToAsync rule: EventListener - conversion: ToAsync - rule: ICache + rule: Cache typeConversion: - conversion: Ignore name: EnumerableImpl @@ -134,7 +134,7 @@ - parameter: Required requiresCancellationToken: - rule: EventListener - - rule: ICache + - rule: Cache scanMethodBody: true scanForMissingAsyncMembers: - all: true @@ -267,7 +267,9 @@ methodRules: name: Lock - containingType: NHibernate.Cache.ICache name: Unlock - name: ICache + - containingType: NHibernate.Cache.IBatchableReadOnlyCache + - containingType: NHibernate.Cache.IBatchableCache + name: Cache - filters: - containingNamespace: NHibernate - containingType: NHibernate.Tool.hbm2ddl.SchemaUpdate diff --git a/src/NHibernate.Test/Async/CacheTest/BatchableCacheFixture.cs b/src/NHibernate.Test/Async/CacheTest/BatchableCacheFixture.cs new file mode 100644 index 00000000000..b8f96393a28 --- /dev/null +++ b/src/NHibernate.Test/Async/CacheTest/BatchableCacheFixture.cs @@ -0,0 +1,702 @@ +//------------------------------------------------------------------------------ +// +// This code was generated by AsyncGenerator. +// +// Changes to this file may cause incorrect behavior and will be lost if +// the code is regenerated. +// +//------------------------------------------------------------------------------ + + +using System; +using System.Collections; +using System.Collections.Generic; +using System.Linq; +using System.Reflection; +using System.Text; +using System.Threading.Tasks; +using NHibernate.Cache; +using NHibernate.Cfg; +using NHibernate.DomainModel; +using NHibernate.Test.CacheTest.Caches; +using NUnit.Framework; +using Environment = NHibernate.Cfg.Environment; +using NHibernate.Linq; + +namespace NHibernate.Test.CacheTest +{ + using System.Threading; + [TestFixture] + public class BatchableCacheFixtureAsync : TestCase + { + protected override IList Mappings => new[] + { + "CacheTest.ReadOnly.hbm.xml", + "CacheTest.ReadWrite.hbm.xml" + }; + + protected override string MappingsAssembly => "NHibernate.Test"; + + protected override string CacheConcurrencyStrategy => null; + + protected override void Configure(Configuration configuration) + { + configuration.SetProperty(Environment.UseSecondLevelCache, "true"); + configuration.SetProperty(Environment.UseQueryCache, "true"); + configuration.SetProperty(Environment.CacheProvider, typeof(BatchableCacheProvider).AssemblyQualifiedName); + } + + protected override bool CheckDatabaseWasCleaned() + { + base.CheckDatabaseWasCleaned(); + return true; // We are unable to delete read-only items. + } + + protected override void OnSetUp() + { + using (var s = Sfi.OpenSession()) + using (var tx = s.BeginTransaction()) + { + var totalItems = 6; + for (var i = 1; i <= totalItems; i++) + { + var parent = new ReadOnly + { + Name = $"Name{i}" + }; + for (var j = 1; j <= totalItems; j++) + { + var child = new ReadOnlyItem + { + Parent = parent + }; + parent.Items.Add(child); + } + s.Save(parent); + } + for (var i = 1; i <= totalItems; i++) + { + var parent = new ReadWrite + { + Name = $"Name{i}" + }; + for (var j = 1; j <= totalItems; j++) + { + var child = new ReadWriteItem + { + Parent = parent + }; + parent.Items.Add(child); + } + s.Save(parent); + } + tx.Commit(); + } + } + + protected override void OnTearDown() + { + using (var s = OpenSession()) + using (var tx = s.BeginTransaction()) + { + s.Delete("from ReadWrite"); + tx.Commit(); + } + } + + [Test] + public async Task MultipleGetReadOnlyCollectionTestAsync() + { + var persister = Sfi.GetCollectionPersister($"{typeof(ReadOnly).FullName}.Items"); + Assert.That(persister.Cache.Cache, Is.Not.Null); + Assert.That(persister.Cache.Cache, Is.TypeOf()); + var cache = (BatchableCache) persister.Cache.Cache; + var ids = new List(); + + using (var s = Sfi.OpenSession()) + using (var tx = s.BeginTransaction()) + { + var items = await (s.Query().ToListAsync()); + ids.AddRange(items.OrderBy(o => o.Id).Select(o => o.Id)); + await (tx.CommitAsync()); + } + + // Batch size 5 + var testCases = new List>> + { + // When the cache is empty, GetMultiple will be called two times. One time in type + // DefaultInitializeCollectionEventListener and the other time in BatchingCollectionInitializer. + new Tuple>( + 0, + new int[][] + { + new[] {0, 1, 2, 3, 4}, // triggered by InitializeCollectionFromCache method of DefaultInitializeCollectionEventListener type + new[] {1, 2, 3, 4, 5}, // triggered by Initialize method of BatchingCollectionInitializer type + }, + new[] {0, 1, 2, 3, 4}, + null + ), + // When there are not enough uninitialized collections after the demanded one to fill the batch, + // the nearest before the demanded collection are added. + new Tuple>( + 4, + new int[][] + { + new[] {4, 5, 3, 2, 1}, + new[] {5, 3, 2, 1, 0}, + }, + new[] {1, 2, 3, 4, 5}, + null + ), + new Tuple>( + 5, + new int[][] + { + new[] {5, 4, 3, 2, 1}, + new[] {4, 3, 2, 1, 0}, + }, + new[] {1, 2, 3, 4, 5}, + null + ), + new Tuple>( + 0, + new int[][] + { + new[] {0, 1, 2, 3, 4} // 0 get assembled and no further processing is done + }, + null, + (i) => i % 2 == 0 // Cache all even indexes before loading + ), + new Tuple>( + 1, + new int[][] + { + new[] {1, 2, 3, 4, 5}, // 2 and 4 get assembled inside InitializeCollectionFromCache + new[] {3, 5, 0} + }, + new[] {1, 3, 5}, + (i) => i % 2 == 0 + ), + new Tuple>( + 5, + new int[][] + { + new[] {5, 4, 3, 2, 1}, // 4 and 2 get assembled inside InitializeCollectionFromCache + new[] {3, 1, 0} + }, + new[] {1, 3, 5}, + (i) => i % 2 == 0 + ), + new Tuple>( + 0, + new int[][] + { + new[] {0, 1, 2, 3, 4}, // 1 and 3 get assembled inside InitializeCollectionFromCache + new[] {2, 4, 5} + }, + new[] {0, 2, 4}, + (i) => i % 2 != 0 + ), + new Tuple>( + 4, + new int[][] + { + new[] {4, 5, 3, 2, 1}, // 5, 3 and 1 get assembled inside InitializeCollectionFromCache + new[] {2, 0} + }, + new[] {0, 2, 4}, + (i) => i % 2 != 0 + ) + }; + + foreach (var tuple in testCases) + { + await (AssertMultipleCacheCollectionCallsAsync(ids, tuple.Item1, tuple.Item2, tuple.Item3, tuple.Item4)); + } + } + + [Test] + public async Task MultipleGetReadOnlyTestAsync() + { + var persister = Sfi.GetEntityPersister(typeof(ReadOnly).FullName); + Assert.That(persister.Cache.Cache, Is.Not.Null); + Assert.That(persister.Cache.Cache, Is.TypeOf()); + var cache = (BatchableCache) persister.Cache.Cache; + var ids = new List(); + + using (var s = Sfi.OpenSession()) + using (var tx = s.BeginTransaction()) + { + var items = await (s.Query().ToListAsync()); + ids.AddRange(items.OrderBy(o => o.Id).Select(o => o.Id)); + await (tx.CommitAsync()); + } + // Batch size 3 + var parentTestCases = new List>> + { + // When the cache is empty, GetMultiple will be called two times. One time in type + // DefaultLoadEventListener and the other time in BatchingEntityLoader. + new Tuple>( + 0, + new int[][] + { + new[] {0, 1, 2}, // triggered by LoadFromSecondLevelCache method of DefaultLoadEventListener type + new[] {1, 2, 3}, // triggered by Load method of BatchingEntityLoader type + }, + new[] {0, 1, 2}, + null + ), + // When there are not enough uninitialized entities after the demanded one to fill the batch, + // the nearest before the demanded entity are added. + new Tuple>( + 4, + new int[][] + { + new[] {4, 5, 3}, + new[] {5, 3, 2}, + }, + new[] {3, 4, 5}, + null + ), + new Tuple>( + 5, + new int[][] + { + new[] {5, 4, 3}, + new[] {4, 3, 2}, + }, + new[] {3, 4, 5}, + null + ), + new Tuple>( + 0, + new int[][] + { + new[] {0, 1, 2} // 0 get assembled and no further processing is done + }, + null, + (i) => i % 2 == 0 // Cache all even indexes before loading + ), + new Tuple>( + 1, + new int[][] + { + new[] {1, 2, 3}, // 2 gets assembled inside LoadFromSecondLevelCache + new[] {3, 4, 5} + }, + new[] {1, 3, 5}, + (i) => i % 2 == 0 + ), + new Tuple>( + 5, + new int[][] + { + new[] {5, 4, 3}, // 4 gets assembled inside LoadFromSecondLevelCache + new[] {3, 2, 1} + }, + new[] {1, 3, 5}, + (i) => i % 2 == 0 + ), + new Tuple>( + 0, + new int[][] + { + new[] {0, 1, 2}, // 1 gets assembled inside LoadFromSecondLevelCache + new[] {2, 3, 4} + }, + new[] {0, 2, 4}, + (i) => i % 2 != 0 + ), + new Tuple>( + 4, + new int[][] + { + new[] {4, 5, 3}, // 5 and 3 get assembled inside LoadFromSecondLevelCache + new[] {2, 1, 0} + }, + new[] {0, 2, 4}, + (i) => i % 2 != 0 + ) + }; + + foreach (var tuple in parentTestCases) + { + await (AssertMultipleCacheCallsAsync(ids, tuple.Item1, tuple.Item2, tuple.Item3, tuple.Item4)); + } + } + + [Test] + public async Task MultipleGetReadOnlyItemTestAsync() + { + var persister = Sfi.GetEntityPersister(typeof(ReadOnlyItem).FullName); + Assert.That(persister.Cache.Cache, Is.Not.Null); + Assert.That(persister.Cache.Cache, Is.TypeOf()); + var cache = (BatchableCache) persister.Cache.Cache; + var ids = new List(); + + using (var s = Sfi.OpenSession()) + using (var tx = s.BeginTransaction()) + { + var items = await (s.Query().Take(6).ToListAsync()); + ids.AddRange(items.OrderBy(o => o.Id).Select(o => o.Id)); + await (tx.CommitAsync()); + } + // Batch size 4 + var parentTestCases = new List>> + { + // When the cache is empty, GetMultiple will be called two times. One time in type + // DefaultLoadEventListener and the other time in BatchingEntityLoader. + new Tuple>( + 0, + new int[][] + { + new[] {0, 1, 2, 3}, // triggered by LoadFromSecondLevelCache method of DefaultLoadEventListener type + new[] {1, 2, 3, 4}, // triggered by Load method of BatchingEntityLoader type + }, + new[] {0, 1, 2, 3}, + null + ), + // When there are not enough uninitialized entities after the demanded one to fill the batch, + // the nearest before the demanded entity are added. + new Tuple>( + 4, + new int[][] + { + new[] {4, 5, 3, 2}, + new[] {5, 3, 2, 1}, + }, + new[] {2, 3, 4, 5}, + null + ), + new Tuple>( + 5, + new int[][] + { + new[] {5, 4, 3, 2}, + new[] {4, 3, 2, 1}, + }, + new[] {2, 3, 4, 5}, + null + ), + new Tuple>( + 0, + new int[][] + { + new[] {0, 1, 2, 3} // 0 get assembled and no further processing is done + }, + null, + (i) => i % 2 == 0 // Cache all even indexes before loading + ), + new Tuple>( + 1, + new int[][] + { + new[] {1, 2, 3, 4}, // 2 and 4 get assembled inside LoadFromSecondLevelCache + new[] {3, 5, 0} + }, + new[] {1, 3, 5}, + (i) => i % 2 == 0 + ), + new Tuple>( + 5, + new int[][] + { + new[] {5, 4, 3, 2}, // 4 and 2 get assembled inside LoadFromSecondLevelCache + new[] {3, 1, 0} + }, + new[] {1, 3, 5}, + (i) => i % 2 == 0 + ), + new Tuple>( + 0, + new int[][] + { + new[] {0, 1, 2, 3}, // 1 and 3 get assembled inside LoadFromSecondLevelCache + new[] {2, 4, 5} + }, + new[] {0, 2, 4}, + (i) => i % 2 != 0 + ), + new Tuple>( + 4, + new int[][] + { + new[] {4, 5, 3, 2}, // 5 and 3 get assembled inside LoadFromSecondLevelCache + new[] {2, 1, 0} + }, + new[] {0, 2, 4}, + (i) => i % 2 != 0 + ) + }; + + foreach (var tuple in parentTestCases) + { + await (AssertMultipleCacheCallsAsync(ids, tuple.Item1, tuple.Item2, tuple.Item3, tuple.Item4)); + } + } + + [Test] + public async Task MultiplePutReadWriteTestAsync() + { + var persister = Sfi.GetEntityPersister(typeof(ReadWrite).FullName); + Assert.That(persister.Cache.Cache, Is.Not.Null); + Assert.That(persister.Cache.Cache, Is.TypeOf()); + var cache = (BatchableCache) persister.Cache.Cache; + var ids = new List(); + + await (cache.ClearAsync(CancellationToken.None)); + cache.ClearStatistics(); + + using (var s = Sfi.OpenSession()) + using (var tx = s.BeginTransaction()) + { + var items = await (s.Query().ToListAsync()); + ids.AddRange(items.OrderBy(o => o.Id).Select(o => o.Id)); + await (tx.CommitAsync()); + } + Assert.That(cache.PutCalls, Has.Count.EqualTo(0)); + Assert.That(cache.GetMultipleCalls, Has.Count.EqualTo(2)); + + AssertEquivalent( + ids, + new int[][] + { + new[] {0, 1, 2}, + new[] {3, 4, 5} + }, + cache.PutMultipleCalls + ); + AssertEquivalent( + ids, + new int[][] + { + new[] {0, 1, 2}, + new[] {3, 4, 5} + }, + cache.LockMultipleCalls + ); + AssertEquivalent( + ids, + new int[][] + { + new[] {0, 1, 2}, + new[] {3, 4, 5} + }, + cache.UnlockMultipleCalls + ); + } + + [Test] + public async Task MultiplePutReadWriteItemTestAsync() + { + var persister = Sfi.GetCollectionPersister($"{typeof(ReadWrite).FullName}.Items"); + Assert.That(persister.Cache.Cache, Is.Not.Null); + Assert.That(persister.Cache.Cache, Is.TypeOf()); + var cache = (BatchableCache) persister.Cache.Cache; + var ids = new List(); + + await (cache.ClearAsync(CancellationToken.None)); + cache.ClearStatistics(); + + using (var s = Sfi.OpenSession()) + using (var tx = s.BeginTransaction()) + { + var items = await (s.Query().ToListAsync()); + ids.AddRange(items.OrderBy(o => o.Id).Select(o => o.Id)); + + // Initialize the first item collection + await (NHibernateUtil.InitializeAsync(items.First(o => o.Id == ids[0]).Items)); + await (tx.CommitAsync()); + } + Assert.That(cache.PutCalls, Has.Count.EqualTo(0)); + // Called in: DefaultInitializeCollectionEventListener, BatchingCollectionInitializer and ReadWriteCache + Assert.That(cache.GetMultipleCalls, Has.Count.EqualTo(3)); + + AssertEquivalent( + ids, + new int[][] + { + new[] {0, 1, 2, 3, 4} + }, + cache.PutMultipleCalls + ); + AssertEquivalent( + ids, + new int[][] + { + new[] {0, 1, 2, 3, 4} + }, + cache.LockMultipleCalls + ); + AssertEquivalent( + ids, + new int[][] + { + new[] {0, 1, 2, 3, 4} + }, + cache.UnlockMultipleCalls + ); + } + + [Test] + public async Task UpdateTimestampsCacheTestAsync() + { + var timestamp = Sfi.UpdateTimestampsCache; + var field = typeof(UpdateTimestampsCache).GetField( + "_batchUpdateTimestamps", + BindingFlags.NonPublic | BindingFlags.Instance); + Assert.That(field, Is.Not.Null); + var cache = (BatchableCache) field.GetValue(timestamp); + Assert.That(cache, Is.Not.Null); + + using (var s = OpenSession()) + { + const string query = "from ReadOnly e where e.Name = :name"; + const string name = "Name1"; + await (s + .CreateQuery(query) + .SetString("name", name) + .SetCacheable(true) + .UniqueResultAsync()); + + // Run a second time, just to test the query cache + var result = await (s + .CreateQuery(query) + .SetString("name", name) + .SetCacheable(true) + .UniqueResultAsync()); + + Assert.That(result, Is.Not.Null); + Assert.That(cache.GetMultipleCalls, Has.Count.EqualTo(1)); + Assert.That(cache.GetCalls, Has.Count.EqualTo(0)); + } + } + + private async Task AssertMultipleCacheCallsAsync(List ids, int idIndex, int[][] fetchedIdIndexes, int[] putIdIndexes, Func cacheBeforeLoadFn = null, CancellationToken cancellationToken = default(CancellationToken)) + where TEntity : CacheEntity + { + var persister = Sfi.GetEntityPersister(typeof(TEntity).FullName); + var cache = (BatchableCache) persister.Cache.Cache; + await (cache.ClearAsync(cancellationToken)); + + if (cacheBeforeLoadFn != null) + { + using (var s = Sfi.OpenSession()) + using (var tx = s.BeginTransaction()) + { + foreach (var id in ids.Where((o, i) => cacheBeforeLoadFn(i))) + { + await (s.GetAsync(id, cancellationToken)); + } + await (tx.CommitAsync(cancellationToken)); + } + } + + using (var s = Sfi.OpenSession()) + using (var tx = s.BeginTransaction()) + { + cache.ClearStatistics(); + + foreach (var id in ids) + { + await (s.LoadAsync(id, cancellationToken)); + } + var item = await (s.GetAsync(ids[idIndex], cancellationToken)); + Assert.That(item, Is.Not.Null); + Assert.That(cache.GetCalls, Has.Count.EqualTo(0)); + Assert.That(cache.PutCalls, Has.Count.EqualTo(0)); + Assert.That(cache.GetMultipleCalls, Has.Count.EqualTo(fetchedIdIndexes.GetLength(0))); + if (putIdIndexes == null) + { + Assert.That(cache.PutMultipleCalls, Has.Count.EqualTo(0)); + } + else + { + Assert.That(cache.PutMultipleCalls, Has.Count.EqualTo(1)); + Assert.That( + cache.PutMultipleCalls[0].OfType().Select(o => (int) o.Key), + Is.EquivalentTo(putIdIndexes.Select(o => ids[o]))); + } + + for (int i = 0; i < fetchedIdIndexes.GetLength(0); i++) + { + Assert.That( + cache.GetMultipleCalls[i].OfType().Select(o => (int) o.Key), + Is.EquivalentTo(fetchedIdIndexes[i].Select(o => ids[o]))); + } + + await (tx.CommitAsync(cancellationToken)); + } + } + + private void AssertEquivalent(List ids, int[][] expectedIdIndexes, List result) + { + Assert.That(result, Has.Count.EqualTo(expectedIdIndexes.GetLength(0))); + for (int i = 0; i < expectedIdIndexes.GetLength(0); i++) + { + Assert.That( + result[i].OfType().Select(o => (int) o.Key), + Is.EquivalentTo(expectedIdIndexes[i].Select(o => ids[o]))); + } + } + + private async Task AssertMultipleCacheCollectionCallsAsync(List ids, int idIndex, int[][] fetchedIdIndexes, int[] putIdIndexes, Func cacheBeforeLoadFn = null, CancellationToken cancellationToken = default(CancellationToken)) + { + var persister = Sfi.GetCollectionPersister($"{typeof(ReadOnly).FullName}.Items"); + var cache = (BatchableCache) persister.Cache.Cache; + await (cache.ClearAsync(cancellationToken)); + + if (cacheBeforeLoadFn != null) + { + using (var s = Sfi.OpenSession()) + using (var tx = s.BeginTransaction()) + { + foreach (var id in ids.Where((o, i) => cacheBeforeLoadFn(i))) + { + var item = await (s.GetAsync(id, cancellationToken)); + await (NHibernateUtil.InitializeAsync(item.Items, cancellationToken)); + } + await (tx.CommitAsync(cancellationToken)); + } + } + + using (var s = Sfi.OpenSession()) + using (var tx = s.BeginTransaction()) + { + cache.ClearStatistics(); + + foreach (var id in ids) + { + await (s.GetAsync(id, cancellationToken)); + } + var item = await (s.GetAsync(ids[idIndex], cancellationToken)); + Assert.That(item, Is.Not.Null); + await (NHibernateUtil.InitializeAsync(item.Items, cancellationToken)); + Assert.That(cache.GetCalls, Has.Count.EqualTo(0)); + Assert.That(cache.PutCalls, Has.Count.EqualTo(0)); + Assert.That(cache.GetMultipleCalls, Has.Count.EqualTo(fetchedIdIndexes.GetLength(0))); + if (putIdIndexes == null) + { + Assert.That(cache.PutMultipleCalls, Has.Count.EqualTo(0)); + } + else + { + Assert.That(cache.PutMultipleCalls, Has.Count.EqualTo(1)); + Assert.That( + cache.PutMultipleCalls[0].OfType().Select(o => (int) o.Key), + Is.EquivalentTo(putIdIndexes.Select(o => ids[o]))); + } + + for (int i = 0; i < fetchedIdIndexes.GetLength(0); i++) + { + Assert.That( + cache.GetMultipleCalls[i].OfType().Select(o => (int) o.Key), + Is.EquivalentTo(fetchedIdIndexes[i].Select(o => ids[o]))); + } + + await (tx.CommitAsync(cancellationToken)); + } + } + + } +} diff --git a/src/NHibernate.Test/Async/CacheTest/BatchableCacheSubclassFixture.cs b/src/NHibernate.Test/Async/CacheTest/BatchableCacheSubclassFixture.cs new file mode 100644 index 00000000000..0c916dfa495 --- /dev/null +++ b/src/NHibernate.Test/Async/CacheTest/BatchableCacheSubclassFixture.cs @@ -0,0 +1,151 @@ +//------------------------------------------------------------------------------ +// +// This code was generated by AsyncGenerator. +// +// Changes to this file may cause incorrect behavior and will be lost if +// the code is regenerated. +// +//------------------------------------------------------------------------------ + + +using System; +using System.Collections; +using System.Collections.Generic; +using System.Linq; +using NHibernate.Cache; +using NHibernate.Cfg; +using NHibernate.DomainModel; +using NHibernate.Test.CacheTest.Caches; +using NUnit.Framework; + +namespace NHibernate.Test.CacheTest +{ + using System.Threading.Tasks; + [TestFixture] + public class BatchableCacheSubclassFixtureAsync : TestCase + { + protected override IList Mappings + { + get + { + return new string[] + { + "FooBar.hbm.xml", + "Baz.hbm.xml", + "Qux.hbm.xml", + "Glarch.hbm.xml", + "Fum.hbm.xml", + "Fumm.hbm.xml", + "Fo.hbm.xml", + "One.hbm.xml", + "Many.hbm.xml", + "Immutable.hbm.xml", + "Fee.hbm.xml", + "Vetoer.hbm.xml", + "Holder.hbm.xml", + "Location.hbm.xml", + "Stuff.hbm.xml", + "Container.hbm.xml", + "Simple.hbm.xml" + }; + } + } + + protected override void Configure(Configuration configuration) + { + configuration.SetProperty(Cfg.Environment.UseSecondLevelCache, "true"); + configuration.SetProperty(Cfg.Environment.UseQueryCache, "true"); + configuration.SetProperty(Cfg.Environment.CacheProvider, typeof(BatchableCacheProvider).AssemblyQualifiedName); + } + + protected override void OnSetUp() + { + using (var s = Sfi.OpenSession()) + using (var tx = s.BeginTransaction()) + { + FooProxy flast = new Bar(); + s.Save(flast); + for (int i = 0; i < 5; i++) + { + FooProxy foo = new Bar(); + s.Save(foo); + flast.TheFoo = foo; + flast = flast.TheFoo; + flast.String = "foo" + (i + 1); + } + tx.Commit(); + } + } + + protected override void OnTearDown() + { + using (var s = Sfi.OpenSession()) + using (var tx = s.BeginTransaction()) + { + s.Delete("from NHibernate.DomainModel.Foo as foo"); + tx.Commit(); + } + } + + [Test] + public async Task BatchableRootEntityTestAsync() + { + var persister = Sfi.GetEntityPersister(typeof(Foo).FullName); + Assert.That(persister.Cache.Cache, Is.Not.Null); + Assert.That(persister.Cache.Cache, Is.TypeOf()); + var fooCache = (BatchableCache) persister.Cache.Cache; + + persister = Sfi.GetEntityPersister(typeof(Bar).FullName); + Assert.That(persister.Cache.Cache, Is.Not.Null); + Assert.That(persister.Cache.Cache, Is.TypeOf()); + var barCache = (BatchableCache) persister.Cache.Cache; + + Assert.That(barCache, Is.EqualTo(fooCache)); + + // Add Bar to cache + using (var s = Sfi.OpenSession()) + using (var tx = s.BeginTransaction()) + { + var list = await (s.CreateQuery("from foo in class NHibernate.DomainModel.Foo").ListAsync()); + Assert.AreEqual(6, list.Count); + await (tx.CommitAsync()); + } + + Assert.That(fooCache.PutCalls, Has.Count.EqualTo(6)); // Bar is not batchable + Assert.That(fooCache.PutMultipleCalls, Has.Count.EqualTo(0)); + + // Batch fetch by two from cache + using (var s = Sfi.OpenSession()) + using (var tx = s.BeginTransaction()) + { + var enumerator = + (await (s.CreateQuery("from foo in class NHibernate.DomainModel.Foo order by foo.String").EnumerableAsync())).GetEnumerator(); + var i = 1; + while (enumerator.MoveNext()) + { + BarProxy bar = (BarProxy) enumerator.Current; + if (i % 2 == 0) + { + string theString = bar.String; // Load the entity + } + i++; + } + await (tx.CommitAsync()); + } + + Assert.That(fooCache.GetMultipleCalls, Has.Count.EqualTo(3)); + + // Check that each key was used only once when retriving objects from the cache + var uniqueKeys = new HashSet(); + foreach (var keys in fooCache.GetMultipleCalls) + { + Assert.That(keys, Has.Length.EqualTo(2)); + foreach (var key in keys.OfType().Select(o => (string) o.Key)) + { + Assert.That(uniqueKeys, Does.Not.Contains(key)); + uniqueKeys.Add(key); + } + } + } + } +} diff --git a/src/NHibernate.Test/Async/CacheTest/Caches/BatchableCache.cs b/src/NHibernate.Test/Async/CacheTest/Caches/BatchableCache.cs new file mode 100644 index 00000000000..808997b9323 --- /dev/null +++ b/src/NHibernate.Test/Async/CacheTest/Caches/BatchableCache.cs @@ -0,0 +1,162 @@ +//------------------------------------------------------------------------------ +// +// This code was generated by AsyncGenerator. +// +// Changes to this file may cause incorrect behavior and will be lost if +// the code is regenerated. +// +//------------------------------------------------------------------------------ + + +using System; +using System.Collections; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using NHibernate.Cache; + +namespace NHibernate.Test.CacheTest.Caches +{ + public partial class BatchableCache : ICache, IBatchableCache + { + + public Task PutManyAsync(object[] keys, object[] values, CancellationToken cancellationToken) + { + try + { + PutMultipleCalls.Add(keys); + for (int i = 0; i < keys.Length; i++) + { + _hashtable[keys[i]] = values[i]; + } + return Task.CompletedTask; + } + catch (Exception ex) + { + return Task.FromException(ex); + } + } + + public Task LockManyAsync(object[] keys, CancellationToken cancellationToken) + { + try + { + LockMultipleCalls.Add(keys); + return Task.FromResult(null); + } + catch (Exception ex) + { + return Task.FromException(ex); + } + } + + public Task UnlockManyAsync(object[] keys, object lockValue, CancellationToken cancellationToken) + { + try + { + UnlockMultipleCalls.Add(keys); + return Task.CompletedTask; + } + catch (Exception ex) + { + return Task.FromException(ex); + } + } + + #region ICache Members + + /// + public Task GetAsync(object key, CancellationToken cancellationToken) + { + try + { + GetCalls.Add(key); + return Task.FromResult(_hashtable[key]); + } + catch (Exception ex) + { + return Task.FromException(ex); + } + } + + public Task GetManyAsync(object[] keys, CancellationToken cancellationToken) + { + try + { + GetMultipleCalls.Add(keys); + var result = new object[keys.Length]; + for (var i = 0; i < keys.Length; i++) + { + result[i] = _hashtable[keys[i]]; + } + return Task.FromResult(result); + } + catch (Exception ex) + { + return Task.FromException(ex); + } + } + + /// + public Task PutAsync(object key, object value, CancellationToken cancellationToken) + { + try + { + PutCalls.Add(key); + _hashtable[key] = value; + return Task.CompletedTask; + } + catch (Exception ex) + { + return Task.FromException(ex); + } + } + + /// + public Task RemoveAsync(object key, CancellationToken cancellationToken) + { + try + { + _hashtable.Remove(key); + return Task.CompletedTask; + } + catch (Exception ex) + { + return Task.FromException(ex); + } + } + + /// + /// A cancellation token that can be used to cancel the work + public Task ClearAsync(CancellationToken cancellationToken) + { + try + { + _hashtable.Clear(); + return Task.CompletedTask; + } + catch (Exception ex) + { + return Task.FromException(ex); + } + } + + /// + public Task LockAsync(object key, CancellationToken cancellationToken) + { + return Task.CompletedTask; + // local cache, so we use synchronization + } + + /// + public Task UnlockAsync(object key, CancellationToken cancellationToken) + { + return Task.CompletedTask; + // local cache, so we use synchronization + } + + #endregion + } +} diff --git a/src/NHibernate.Test/CacheTest/BatchableCacheFixture.cs b/src/NHibernate.Test/CacheTest/BatchableCacheFixture.cs new file mode 100644 index 00000000000..84a679aa8d3 --- /dev/null +++ b/src/NHibernate.Test/CacheTest/BatchableCacheFixture.cs @@ -0,0 +1,690 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Linq; +using System.Reflection; +using System.Text; +using System.Threading.Tasks; +using NHibernate.Cache; +using NHibernate.Cfg; +using NHibernate.DomainModel; +using NHibernate.Test.CacheTest.Caches; +using NUnit.Framework; +using Environment = NHibernate.Cfg.Environment; + +namespace NHibernate.Test.CacheTest +{ + [TestFixture] + public class BatchableCacheFixture : TestCase + { + protected override IList Mappings => new[] + { + "CacheTest.ReadOnly.hbm.xml", + "CacheTest.ReadWrite.hbm.xml" + }; + + protected override string MappingsAssembly => "NHibernate.Test"; + + protected override string CacheConcurrencyStrategy => null; + + protected override void Configure(Configuration configuration) + { + configuration.SetProperty(Environment.UseSecondLevelCache, "true"); + configuration.SetProperty(Environment.UseQueryCache, "true"); + configuration.SetProperty(Environment.CacheProvider, typeof(BatchableCacheProvider).AssemblyQualifiedName); + } + + protected override bool CheckDatabaseWasCleaned() + { + base.CheckDatabaseWasCleaned(); + return true; // We are unable to delete read-only items. + } + + protected override void OnSetUp() + { + using (var s = Sfi.OpenSession()) + using (var tx = s.BeginTransaction()) + { + var totalItems = 6; + for (var i = 1; i <= totalItems; i++) + { + var parent = new ReadOnly + { + Name = $"Name{i}" + }; + for (var j = 1; j <= totalItems; j++) + { + var child = new ReadOnlyItem + { + Parent = parent + }; + parent.Items.Add(child); + } + s.Save(parent); + } + for (var i = 1; i <= totalItems; i++) + { + var parent = new ReadWrite + { + Name = $"Name{i}" + }; + for (var j = 1; j <= totalItems; j++) + { + var child = new ReadWriteItem + { + Parent = parent + }; + parent.Items.Add(child); + } + s.Save(parent); + } + tx.Commit(); + } + } + + protected override void OnTearDown() + { + using (var s = OpenSession()) + using (var tx = s.BeginTransaction()) + { + s.Delete("from ReadWrite"); + tx.Commit(); + } + } + + [Test] + public void MultipleGetReadOnlyCollectionTest() + { + var persister = Sfi.GetCollectionPersister($"{typeof(ReadOnly).FullName}.Items"); + Assert.That(persister.Cache.Cache, Is.Not.Null); + Assert.That(persister.Cache.Cache, Is.TypeOf()); + var cache = (BatchableCache) persister.Cache.Cache; + var ids = new List(); + + using (var s = Sfi.OpenSession()) + using (var tx = s.BeginTransaction()) + { + var items = s.Query().ToList(); + ids.AddRange(items.OrderBy(o => o.Id).Select(o => o.Id)); + tx.Commit(); + } + + // Batch size 5 + var testCases = new List>> + { + // When the cache is empty, GetMultiple will be called two times. One time in type + // DefaultInitializeCollectionEventListener and the other time in BatchingCollectionInitializer. + new Tuple>( + 0, + new int[][] + { + new[] {0, 1, 2, 3, 4}, // triggered by InitializeCollectionFromCache method of DefaultInitializeCollectionEventListener type + new[] {1, 2, 3, 4, 5}, // triggered by Initialize method of BatchingCollectionInitializer type + }, + new[] {0, 1, 2, 3, 4}, + null + ), + // When there are not enough uninitialized collections after the demanded one to fill the batch, + // the nearest before the demanded collection are added. + new Tuple>( + 4, + new int[][] + { + new[] {4, 5, 3, 2, 1}, + new[] {5, 3, 2, 1, 0}, + }, + new[] {1, 2, 3, 4, 5}, + null + ), + new Tuple>( + 5, + new int[][] + { + new[] {5, 4, 3, 2, 1}, + new[] {4, 3, 2, 1, 0}, + }, + new[] {1, 2, 3, 4, 5}, + null + ), + new Tuple>( + 0, + new int[][] + { + new[] {0, 1, 2, 3, 4} // 0 get assembled and no further processing is done + }, + null, + (i) => i % 2 == 0 // Cache all even indexes before loading + ), + new Tuple>( + 1, + new int[][] + { + new[] {1, 2, 3, 4, 5}, // 2 and 4 get assembled inside InitializeCollectionFromCache + new[] {3, 5, 0} + }, + new[] {1, 3, 5}, + (i) => i % 2 == 0 + ), + new Tuple>( + 5, + new int[][] + { + new[] {5, 4, 3, 2, 1}, // 4 and 2 get assembled inside InitializeCollectionFromCache + new[] {3, 1, 0} + }, + new[] {1, 3, 5}, + (i) => i % 2 == 0 + ), + new Tuple>( + 0, + new int[][] + { + new[] {0, 1, 2, 3, 4}, // 1 and 3 get assembled inside InitializeCollectionFromCache + new[] {2, 4, 5} + }, + new[] {0, 2, 4}, + (i) => i % 2 != 0 + ), + new Tuple>( + 4, + new int[][] + { + new[] {4, 5, 3, 2, 1}, // 5, 3 and 1 get assembled inside InitializeCollectionFromCache + new[] {2, 0} + }, + new[] {0, 2, 4}, + (i) => i % 2 != 0 + ) + }; + + foreach (var tuple in testCases) + { + AssertMultipleCacheCollectionCalls(ids, tuple.Item1, tuple.Item2, tuple.Item3, tuple.Item4); + } + } + + [Test] + public void MultipleGetReadOnlyTest() + { + var persister = Sfi.GetEntityPersister(typeof(ReadOnly).FullName); + Assert.That(persister.Cache.Cache, Is.Not.Null); + Assert.That(persister.Cache.Cache, Is.TypeOf()); + var cache = (BatchableCache) persister.Cache.Cache; + var ids = new List(); + + using (var s = Sfi.OpenSession()) + using (var tx = s.BeginTransaction()) + { + var items = s.Query().ToList(); + ids.AddRange(items.OrderBy(o => o.Id).Select(o => o.Id)); + tx.Commit(); + } + // Batch size 3 + var parentTestCases = new List>> + { + // When the cache is empty, GetMultiple will be called two times. One time in type + // DefaultLoadEventListener and the other time in BatchingEntityLoader. + new Tuple>( + 0, + new int[][] + { + new[] {0, 1, 2}, // triggered by LoadFromSecondLevelCache method of DefaultLoadEventListener type + new[] {1, 2, 3}, // triggered by Load method of BatchingEntityLoader type + }, + new[] {0, 1, 2}, + null + ), + // When there are not enough uninitialized entities after the demanded one to fill the batch, + // the nearest before the demanded entity are added. + new Tuple>( + 4, + new int[][] + { + new[] {4, 5, 3}, + new[] {5, 3, 2}, + }, + new[] {3, 4, 5}, + null + ), + new Tuple>( + 5, + new int[][] + { + new[] {5, 4, 3}, + new[] {4, 3, 2}, + }, + new[] {3, 4, 5}, + null + ), + new Tuple>( + 0, + new int[][] + { + new[] {0, 1, 2} // 0 get assembled and no further processing is done + }, + null, + (i) => i % 2 == 0 // Cache all even indexes before loading + ), + new Tuple>( + 1, + new int[][] + { + new[] {1, 2, 3}, // 2 gets assembled inside LoadFromSecondLevelCache + new[] {3, 4, 5} + }, + new[] {1, 3, 5}, + (i) => i % 2 == 0 + ), + new Tuple>( + 5, + new int[][] + { + new[] {5, 4, 3}, // 4 gets assembled inside LoadFromSecondLevelCache + new[] {3, 2, 1} + }, + new[] {1, 3, 5}, + (i) => i % 2 == 0 + ), + new Tuple>( + 0, + new int[][] + { + new[] {0, 1, 2}, // 1 gets assembled inside LoadFromSecondLevelCache + new[] {2, 3, 4} + }, + new[] {0, 2, 4}, + (i) => i % 2 != 0 + ), + new Tuple>( + 4, + new int[][] + { + new[] {4, 5, 3}, // 5 and 3 get assembled inside LoadFromSecondLevelCache + new[] {2, 1, 0} + }, + new[] {0, 2, 4}, + (i) => i % 2 != 0 + ) + }; + + foreach (var tuple in parentTestCases) + { + AssertMultipleCacheCalls(ids, tuple.Item1, tuple.Item2, tuple.Item3, tuple.Item4); + } + } + + [Test] + public void MultipleGetReadOnlyItemTest() + { + var persister = Sfi.GetEntityPersister(typeof(ReadOnlyItem).FullName); + Assert.That(persister.Cache.Cache, Is.Not.Null); + Assert.That(persister.Cache.Cache, Is.TypeOf()); + var cache = (BatchableCache) persister.Cache.Cache; + var ids = new List(); + + using (var s = Sfi.OpenSession()) + using (var tx = s.BeginTransaction()) + { + var items = s.Query().Take(6).ToList(); + ids.AddRange(items.OrderBy(o => o.Id).Select(o => o.Id)); + tx.Commit(); + } + // Batch size 4 + var parentTestCases = new List>> + { + // When the cache is empty, GetMultiple will be called two times. One time in type + // DefaultLoadEventListener and the other time in BatchingEntityLoader. + new Tuple>( + 0, + new int[][] + { + new[] {0, 1, 2, 3}, // triggered by LoadFromSecondLevelCache method of DefaultLoadEventListener type + new[] {1, 2, 3, 4}, // triggered by Load method of BatchingEntityLoader type + }, + new[] {0, 1, 2, 3}, + null + ), + // When there are not enough uninitialized entities after the demanded one to fill the batch, + // the nearest before the demanded entity are added. + new Tuple>( + 4, + new int[][] + { + new[] {4, 5, 3, 2}, + new[] {5, 3, 2, 1}, + }, + new[] {2, 3, 4, 5}, + null + ), + new Tuple>( + 5, + new int[][] + { + new[] {5, 4, 3, 2}, + new[] {4, 3, 2, 1}, + }, + new[] {2, 3, 4, 5}, + null + ), + new Tuple>( + 0, + new int[][] + { + new[] {0, 1, 2, 3} // 0 get assembled and no further processing is done + }, + null, + (i) => i % 2 == 0 // Cache all even indexes before loading + ), + new Tuple>( + 1, + new int[][] + { + new[] {1, 2, 3, 4}, // 2 and 4 get assembled inside LoadFromSecondLevelCache + new[] {3, 5, 0} + }, + new[] {1, 3, 5}, + (i) => i % 2 == 0 + ), + new Tuple>( + 5, + new int[][] + { + new[] {5, 4, 3, 2}, // 4 and 2 get assembled inside LoadFromSecondLevelCache + new[] {3, 1, 0} + }, + new[] {1, 3, 5}, + (i) => i % 2 == 0 + ), + new Tuple>( + 0, + new int[][] + { + new[] {0, 1, 2, 3}, // 1 and 3 get assembled inside LoadFromSecondLevelCache + new[] {2, 4, 5} + }, + new[] {0, 2, 4}, + (i) => i % 2 != 0 + ), + new Tuple>( + 4, + new int[][] + { + new[] {4, 5, 3, 2}, // 5 and 3 get assembled inside LoadFromSecondLevelCache + new[] {2, 1, 0} + }, + new[] {0, 2, 4}, + (i) => i % 2 != 0 + ) + }; + + foreach (var tuple in parentTestCases) + { + AssertMultipleCacheCalls(ids, tuple.Item1, tuple.Item2, tuple.Item3, tuple.Item4); + } + } + + [Test] + public void MultiplePutReadWriteTest() + { + var persister = Sfi.GetEntityPersister(typeof(ReadWrite).FullName); + Assert.That(persister.Cache.Cache, Is.Not.Null); + Assert.That(persister.Cache.Cache, Is.TypeOf()); + var cache = (BatchableCache) persister.Cache.Cache; + var ids = new List(); + + cache.Clear(); + cache.ClearStatistics(); + + using (var s = Sfi.OpenSession()) + using (var tx = s.BeginTransaction()) + { + var items = s.Query().ToList(); + ids.AddRange(items.OrderBy(o => o.Id).Select(o => o.Id)); + tx.Commit(); + } + Assert.That(cache.PutCalls, Has.Count.EqualTo(0)); + Assert.That(cache.GetMultipleCalls, Has.Count.EqualTo(2)); + + AssertEquivalent( + ids, + new int[][] + { + new[] {0, 1, 2}, + new[] {3, 4, 5} + }, + cache.PutMultipleCalls + ); + AssertEquivalent( + ids, + new int[][] + { + new[] {0, 1, 2}, + new[] {3, 4, 5} + }, + cache.LockMultipleCalls + ); + AssertEquivalent( + ids, + new int[][] + { + new[] {0, 1, 2}, + new[] {3, 4, 5} + }, + cache.UnlockMultipleCalls + ); + } + + [Test] + public void MultiplePutReadWriteItemTest() + { + var persister = Sfi.GetCollectionPersister($"{typeof(ReadWrite).FullName}.Items"); + Assert.That(persister.Cache.Cache, Is.Not.Null); + Assert.That(persister.Cache.Cache, Is.TypeOf()); + var cache = (BatchableCache) persister.Cache.Cache; + var ids = new List(); + + cache.Clear(); + cache.ClearStatistics(); + + using (var s = Sfi.OpenSession()) + using (var tx = s.BeginTransaction()) + { + var items = s.Query().ToList(); + ids.AddRange(items.OrderBy(o => o.Id).Select(o => o.Id)); + + // Initialize the first item collection + NHibernateUtil.Initialize(items.First(o => o.Id == ids[0]).Items); + tx.Commit(); + } + Assert.That(cache.PutCalls, Has.Count.EqualTo(0)); + // Called in: DefaultInitializeCollectionEventListener, BatchingCollectionInitializer and ReadWriteCache + Assert.That(cache.GetMultipleCalls, Has.Count.EqualTo(3)); + + AssertEquivalent( + ids, + new int[][] + { + new[] {0, 1, 2, 3, 4} + }, + cache.PutMultipleCalls + ); + AssertEquivalent( + ids, + new int[][] + { + new[] {0, 1, 2, 3, 4} + }, + cache.LockMultipleCalls + ); + AssertEquivalent( + ids, + new int[][] + { + new[] {0, 1, 2, 3, 4} + }, + cache.UnlockMultipleCalls + ); + } + + [Test] + public void UpdateTimestampsCacheTest() + { + var timestamp = Sfi.UpdateTimestampsCache; + var field = typeof(UpdateTimestampsCache).GetField( + "_batchUpdateTimestamps", + BindingFlags.NonPublic | BindingFlags.Instance); + Assert.That(field, Is.Not.Null); + var cache = (BatchableCache) field.GetValue(timestamp); + Assert.That(cache, Is.Not.Null); + + using (var s = OpenSession()) + { + const string query = "from ReadOnly e where e.Name = :name"; + const string name = "Name1"; + s + .CreateQuery(query) + .SetString("name", name) + .SetCacheable(true) + .UniqueResult(); + + // Run a second time, just to test the query cache + var result = s + .CreateQuery(query) + .SetString("name", name) + .SetCacheable(true) + .UniqueResult(); + + Assert.That(result, Is.Not.Null); + Assert.That(cache.GetMultipleCalls, Has.Count.EqualTo(1)); + Assert.That(cache.GetCalls, Has.Count.EqualTo(0)); + } + } + + private void AssertMultipleCacheCalls(List ids, int idIndex, int[][] fetchedIdIndexes, int[] putIdIndexes, Func cacheBeforeLoadFn = null) + where TEntity : CacheEntity + { + var persister = Sfi.GetEntityPersister(typeof(TEntity).FullName); + var cache = (BatchableCache) persister.Cache.Cache; + cache.Clear(); + + if (cacheBeforeLoadFn != null) + { + using (var s = Sfi.OpenSession()) + using (var tx = s.BeginTransaction()) + { + foreach (var id in ids.Where((o, i) => cacheBeforeLoadFn(i))) + { + s.Get(id); + } + tx.Commit(); + } + } + + using (var s = Sfi.OpenSession()) + using (var tx = s.BeginTransaction()) + { + cache.ClearStatistics(); + + foreach (var id in ids) + { + s.Load(id); + } + var item = s.Get(ids[idIndex]); + Assert.That(item, Is.Not.Null); + Assert.That(cache.GetCalls, Has.Count.EqualTo(0)); + Assert.That(cache.PutCalls, Has.Count.EqualTo(0)); + Assert.That(cache.GetMultipleCalls, Has.Count.EqualTo(fetchedIdIndexes.GetLength(0))); + if (putIdIndexes == null) + { + Assert.That(cache.PutMultipleCalls, Has.Count.EqualTo(0)); + } + else + { + Assert.That(cache.PutMultipleCalls, Has.Count.EqualTo(1)); + Assert.That( + cache.PutMultipleCalls[0].OfType().Select(o => (int) o.Key), + Is.EquivalentTo(putIdIndexes.Select(o => ids[o]))); + } + + for (int i = 0; i < fetchedIdIndexes.GetLength(0); i++) + { + Assert.That( + cache.GetMultipleCalls[i].OfType().Select(o => (int) o.Key), + Is.EquivalentTo(fetchedIdIndexes[i].Select(o => ids[o]))); + } + + tx.Commit(); + } + } + + private void AssertEquivalent(List ids, int[][] expectedIdIndexes, List result) + { + Assert.That(result, Has.Count.EqualTo(expectedIdIndexes.GetLength(0))); + for (int i = 0; i < expectedIdIndexes.GetLength(0); i++) + { + Assert.That( + result[i].OfType().Select(o => (int) o.Key), + Is.EquivalentTo(expectedIdIndexes[i].Select(o => ids[o]))); + } + } + + private void AssertMultipleCacheCollectionCalls(List ids, int idIndex, int[][] fetchedIdIndexes, int[] putIdIndexes, Func cacheBeforeLoadFn = null) + { + var persister = Sfi.GetCollectionPersister($"{typeof(ReadOnly).FullName}.Items"); + var cache = (BatchableCache) persister.Cache.Cache; + cache.Clear(); + + if (cacheBeforeLoadFn != null) + { + using (var s = Sfi.OpenSession()) + using (var tx = s.BeginTransaction()) + { + foreach (var id in ids.Where((o, i) => cacheBeforeLoadFn(i))) + { + var item = s.Get(id); + NHibernateUtil.Initialize(item.Items); + } + tx.Commit(); + } + } + + using (var s = Sfi.OpenSession()) + using (var tx = s.BeginTransaction()) + { + cache.ClearStatistics(); + + foreach (var id in ids) + { + s.Get(id); + } + var item = s.Get(ids[idIndex]); + Assert.That(item, Is.Not.Null); + NHibernateUtil.Initialize(item.Items); + Assert.That(cache.GetCalls, Has.Count.EqualTo(0)); + Assert.That(cache.PutCalls, Has.Count.EqualTo(0)); + Assert.That(cache.GetMultipleCalls, Has.Count.EqualTo(fetchedIdIndexes.GetLength(0))); + if (putIdIndexes == null) + { + Assert.That(cache.PutMultipleCalls, Has.Count.EqualTo(0)); + } + else + { + Assert.That(cache.PutMultipleCalls, Has.Count.EqualTo(1)); + Assert.That( + cache.PutMultipleCalls[0].OfType().Select(o => (int) o.Key), + Is.EquivalentTo(putIdIndexes.Select(o => ids[o]))); + } + + for (int i = 0; i < fetchedIdIndexes.GetLength(0); i++) + { + Assert.That( + cache.GetMultipleCalls[i].OfType().Select(o => (int) o.Key), + Is.EquivalentTo(fetchedIdIndexes[i].Select(o => ids[o]))); + } + + tx.Commit(); + } + } + + } +} diff --git a/src/NHibernate.Test/CacheTest/BatchableCacheSubclassFixture.cs b/src/NHibernate.Test/CacheTest/BatchableCacheSubclassFixture.cs new file mode 100644 index 00000000000..5f2024049a5 --- /dev/null +++ b/src/NHibernate.Test/CacheTest/BatchableCacheSubclassFixture.cs @@ -0,0 +1,140 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Linq; +using NHibernate.Cache; +using NHibernate.Cfg; +using NHibernate.DomainModel; +using NHibernate.Test.CacheTest.Caches; +using NUnit.Framework; + +namespace NHibernate.Test.CacheTest +{ + [TestFixture] + public class BatchableCacheSubclassFixture : TestCase + { + protected override IList Mappings + { + get + { + return new string[] + { + "FooBar.hbm.xml", + "Baz.hbm.xml", + "Qux.hbm.xml", + "Glarch.hbm.xml", + "Fum.hbm.xml", + "Fumm.hbm.xml", + "Fo.hbm.xml", + "One.hbm.xml", + "Many.hbm.xml", + "Immutable.hbm.xml", + "Fee.hbm.xml", + "Vetoer.hbm.xml", + "Holder.hbm.xml", + "Location.hbm.xml", + "Stuff.hbm.xml", + "Container.hbm.xml", + "Simple.hbm.xml" + }; + } + } + + protected override void Configure(Configuration configuration) + { + configuration.SetProperty(Cfg.Environment.UseSecondLevelCache, "true"); + configuration.SetProperty(Cfg.Environment.UseQueryCache, "true"); + configuration.SetProperty(Cfg.Environment.CacheProvider, typeof(BatchableCacheProvider).AssemblyQualifiedName); + } + + protected override void OnSetUp() + { + using (var s = Sfi.OpenSession()) + using (var tx = s.BeginTransaction()) + { + FooProxy flast = new Bar(); + s.Save(flast); + for (int i = 0; i < 5; i++) + { + FooProxy foo = new Bar(); + s.Save(foo); + flast.TheFoo = foo; + flast = flast.TheFoo; + flast.String = "foo" + (i + 1); + } + tx.Commit(); + } + } + + protected override void OnTearDown() + { + using (var s = Sfi.OpenSession()) + using (var tx = s.BeginTransaction()) + { + s.Delete("from NHibernate.DomainModel.Foo as foo"); + tx.Commit(); + } + } + + [Test] + public void BatchableRootEntityTest() + { + var persister = Sfi.GetEntityPersister(typeof(Foo).FullName); + Assert.That(persister.Cache.Cache, Is.Not.Null); + Assert.That(persister.Cache.Cache, Is.TypeOf()); + var fooCache = (BatchableCache) persister.Cache.Cache; + + persister = Sfi.GetEntityPersister(typeof(Bar).FullName); + Assert.That(persister.Cache.Cache, Is.Not.Null); + Assert.That(persister.Cache.Cache, Is.TypeOf()); + var barCache = (BatchableCache) persister.Cache.Cache; + + Assert.That(barCache, Is.EqualTo(fooCache)); + + // Add Bar to cache + using (var s = Sfi.OpenSession()) + using (var tx = s.BeginTransaction()) + { + var list = s.CreateQuery("from foo in class NHibernate.DomainModel.Foo").List(); + Assert.AreEqual(6, list.Count); + tx.Commit(); + } + + Assert.That(fooCache.PutCalls, Has.Count.EqualTo(6)); // Bar is not batchable + Assert.That(fooCache.PutMultipleCalls, Has.Count.EqualTo(0)); + + // Batch fetch by two from cache + using (var s = Sfi.OpenSession()) + using (var tx = s.BeginTransaction()) + { + var enumerator = + s.CreateQuery("from foo in class NHibernate.DomainModel.Foo order by foo.String").Enumerable().GetEnumerator(); + var i = 1; + while (enumerator.MoveNext()) + { + BarProxy bar = (BarProxy) enumerator.Current; + if (i % 2 == 0) + { + string theString = bar.String; // Load the entity + } + i++; + } + tx.Commit(); + } + + Assert.That(fooCache.GetMultipleCalls, Has.Count.EqualTo(3)); + + // Check that each key was used only once when retriving objects from the cache + var uniqueKeys = new HashSet(); + foreach (var keys in fooCache.GetMultipleCalls) + { + Assert.That(keys, Has.Length.EqualTo(2)); + foreach (var key in keys.OfType().Select(o => (string) o.Key)) + { + Assert.That(uniqueKeys, Does.Not.Contains(key)); + uniqueKeys.Add(key); + } + } + } + } +} diff --git a/src/NHibernate.Test/CacheTest/Caches/BatchableCache.cs b/src/NHibernate.Test/CacheTest/Caches/BatchableCache.cs new file mode 100644 index 00000000000..e19985f1422 --- /dev/null +++ b/src/NHibernate.Test/CacheTest/Caches/BatchableCache.cs @@ -0,0 +1,132 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using NHibernate.Cache; + +namespace NHibernate.Test.CacheTest.Caches +{ + public partial class BatchableCache : ICache, IBatchableCache + { + private readonly IDictionary _hashtable = new Hashtable(); + + public List GetMultipleCalls { get; } = new List(); + + public List PutMultipleCalls { get; } = new List(); + + public List LockMultipleCalls { get; } = new List(); + + public List UnlockMultipleCalls { get; } = new List(); + + public List GetCalls { get; } = new List(); + + public List PutCalls { get; } = new List(); + + public void PutMany(object[] keys, object[] values) + { + PutMultipleCalls.Add(keys); + for (int i = 0; i < keys.Length; i++) + { + _hashtable[keys[i]] = values[i]; + } + } + + public object LockMany(object[] keys) + { + LockMultipleCalls.Add(keys); + return null; + } + + public void UnlockMany(object[] keys, object lockValue) + { + UnlockMultipleCalls.Add(keys); + } + + #region ICache Members + + public BatchableCache(string regionName) + { + RegionName = regionName; + } + + /// + public object Get(object key) + { + GetCalls.Add(key); + return _hashtable[key]; + } + + public object[] GetMany(object[] keys) + { + GetMultipleCalls.Add(keys); + var result = new object[keys.Length]; + for (var i = 0; i < keys.Length; i++) + { + result[i] = _hashtable[keys[i]]; + } + return result; + } + + /// + public void Put(object key, object value) + { + PutCalls.Add(key); + _hashtable[key] = value; + } + + /// + public void Remove(object key) + { + _hashtable.Remove(key); + } + + /// + public void Clear() + { + _hashtable.Clear(); + } + + public void ClearStatistics() + { + GetCalls.Clear(); + GetMultipleCalls.Clear(); + PutMultipleCalls.Clear(); + PutCalls.Clear(); + UnlockMultipleCalls.Clear(); + LockMultipleCalls.Clear(); + } + + /// + public void Destroy() + { + } + + /// + public void Lock(object key) + { + // local cache, so we use synchronization + } + + /// + public void Unlock(object key) + { + // local cache, so we use synchronization + } + + /// + public long NextTimestamp() + { + return Timestamper.Next(); + } + + /// + public int Timeout => Timestamper.OneMs * 60000; + + public string RegionName { get; } + + #endregion + } +} diff --git a/src/NHibernate.Test/CacheTest/Caches/BatchableCacheProvider.cs b/src/NHibernate.Test/CacheTest/Caches/BatchableCacheProvider.cs new file mode 100644 index 00000000000..aef5bd5a7c6 --- /dev/null +++ b/src/NHibernate.Test/CacheTest/Caches/BatchableCacheProvider.cs @@ -0,0 +1,34 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; +using NHibernate.Cache; + +namespace NHibernate.Test.CacheTest.Caches +{ + public class BatchableCacheProvider : ICacheProvider + { + #region ICacheProvider Members + + public ICache BuildCache(string regionName, IDictionary properties) + { + return new BatchableCache(regionName); + } + + public long NextTimestamp() + { + return Timestamper.Next(); + } + + public void Start(IDictionary properties) + { + } + + public void Stop() + { + } + + #endregion + } +} diff --git a/src/NHibernate.Test/CacheTest/ReadOnly.cs b/src/NHibernate.Test/CacheTest/ReadOnly.cs new file mode 100644 index 00000000000..c509e0cc2ec --- /dev/null +++ b/src/NHibernate.Test/CacheTest/ReadOnly.cs @@ -0,0 +1,25 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; + +namespace NHibernate.Test.CacheTest +{ + public class ReadOnly : CacheEntity + { + public virtual string Name { get; set; } + + public virtual ISet Items { get; set; } = new HashSet(); + } + + public class ReadOnlyItem : CacheEntity + { + public virtual ReadOnly Parent { get; set; } + } + + public abstract class CacheEntity + { + public virtual int Id { get; protected set; } + } +} diff --git a/src/NHibernate.Test/CacheTest/ReadOnly.hbm.xml b/src/NHibernate.Test/CacheTest/ReadOnly.hbm.xml new file mode 100644 index 00000000000..10f70f3a7a4 --- /dev/null +++ b/src/NHibernate.Test/CacheTest/ReadOnly.hbm.xml @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/NHibernate.Test/CacheTest/ReadWrite.cs b/src/NHibernate.Test/CacheTest/ReadWrite.cs new file mode 100644 index 00000000000..f08added0e2 --- /dev/null +++ b/src/NHibernate.Test/CacheTest/ReadWrite.cs @@ -0,0 +1,20 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; + +namespace NHibernate.Test.CacheTest +{ + public class ReadWrite : CacheEntity + { + public virtual string Name { get; set; } + + public virtual ISet Items { get; set; } = new HashSet(); + } + + public class ReadWriteItem : CacheEntity + { + public virtual ReadWrite Parent { get; set; } + } +} diff --git a/src/NHibernate.Test/CacheTest/ReadWrite.hbm.xml b/src/NHibernate.Test/CacheTest/ReadWrite.hbm.xml new file mode 100644 index 00000000000..c035e40f915 --- /dev/null +++ b/src/NHibernate.Test/CacheTest/ReadWrite.hbm.xml @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/NHibernate/Async/Cache/AbstractCacheBatch.cs b/src/NHibernate/Async/Cache/AbstractCacheBatch.cs new file mode 100644 index 00000000000..c019130ef65 --- /dev/null +++ b/src/NHibernate/Async/Cache/AbstractCacheBatch.cs @@ -0,0 +1,47 @@ +//------------------------------------------------------------------------------ +// +// This code was generated by AsyncGenerator. +// +// Changes to this file may cause incorrect behavior and will be lost if +// the code is regenerated. +// +//------------------------------------------------------------------------------ + + +using System; +using System.Collections.Generic; +using System.Text; +using NHibernate.Engine; + +namespace NHibernate.Cache +{ + using System.Threading.Tasks; + using System.Threading; + internal abstract partial class AbstractCacheBatch + { + + public abstract Task ExecuteAsync(CancellationToken cancellationToken); + } + + internal abstract partial class AbstractCacheBatch : AbstractCacheBatch + { + + public override sealed Task ExecuteAsync(CancellationToken cancellationToken) + { + if (cancellationToken.IsCancellationRequested) + { + return Task.FromCanceled(cancellationToken); + } + try + { + return ExecuteAsync(_batch.ToArray(), cancellationToken); + } + catch (Exception ex) + { + return Task.FromException(ex); + } + } + + protected abstract Task ExecuteAsync(TData[] data, CancellationToken cancellationToken); + } +} diff --git a/src/NHibernate/Async/Cache/CacheBatcher.cs b/src/NHibernate/Async/Cache/CacheBatcher.cs new file mode 100644 index 00000000000..bce22267614 --- /dev/null +++ b/src/NHibernate/Async/Cache/CacheBatcher.cs @@ -0,0 +1,104 @@ +//------------------------------------------------------------------------------ +// +// This code was generated by AsyncGenerator. +// +// Changes to this file may cause incorrect behavior and will be lost if +// the code is regenerated. +// +//------------------------------------------------------------------------------ + + +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Text; +using NHibernate.Cache.Access; +using NHibernate.Engine; +using NHibernate.Persister.Collection; +using NHibernate.Persister.Entity; + +namespace NHibernate.Cache +{ + using System.Threading.Tasks; + using System.Threading; + internal partial class CacheBatcher + { + + /// + /// Adds a put operation to the batch. If the batch size reached the persister batch + /// size, the batch will be executed. + /// + /// The entity persister. + /// The data to put in the cache. + /// A cancellation token that can be used to cancel the work + public async Task AddToBatchAsync(IEntityPersister persister, CachePutData data, CancellationToken cancellationToken) + { + cancellationToken.ThrowIfCancellationRequested(); + if (ShouldExecuteBatch(persister, _putBatch)) + { + await (ExecuteBatchAsync(cancellationToken)).ConfigureAwait(false); + _currentPersister = persister; + _currentBatch = _putBatch = new CachePutBatch(_session, persister.Cache); + } + if (Log.IsDebugEnabled()) + { + Log.Debug("Adding a put operation to batch for entity {0} and key {1}", persister.EntityName, data.Key); + } + _putBatch.Add(data); + } + + /// + /// Adds a put operation to the batch. If the batch size reached the persister batch + /// size, the batch will be executed. + /// + /// The collection persister. + /// The data to put in the cache. + /// A cancellation token that can be used to cancel the work + public async Task AddToBatchAsync(ICollectionPersister persister, CachePutData data, CancellationToken cancellationToken) + { + cancellationToken.ThrowIfCancellationRequested(); + if (ShouldExecuteBatch(persister, _putBatch)) + { + await (ExecuteBatchAsync(cancellationToken)).ConfigureAwait(false); + _currentPersister = persister; + _currentBatch = _putBatch = new CachePutBatch(_session, persister.Cache); + } + if (Log.IsDebugEnabled()) + { + Log.Debug("Adding a put operation to batch for collection role {0} and key {1}", persister.Role, data.Key); + } + _putBatch.Add(data); + } + + /// + /// Executes the current batch. + /// + /// A cancellation token that can be used to cancel the work + public async Task ExecuteBatchAsync(CancellationToken cancellationToken) + { + cancellationToken.ThrowIfCancellationRequested(); + if (_currentBatch == null || _currentBatch.BatchSize == 0) + { + return; + } + + try + { + Stopwatch duration = null; + if (Log.IsDebugEnabled()) + { + duration = Stopwatch.StartNew(); + } + await (_currentBatch.ExecuteAsync(cancellationToken)).ConfigureAwait(false); + if (Log.IsDebugEnabled() && duration != null) + { + Log.Debug("ExecuteBatch for {0} keys took {1} ms", _currentBatch.BatchSize, duration.ElapsedMilliseconds); + } + } + finally + { + Cleanup(); + } + } + } +} diff --git a/src/NHibernate/Async/Cache/CachePutBatch.cs b/src/NHibernate/Async/Cache/CachePutBatch.cs new file mode 100644 index 00000000000..1d6705cfe95 --- /dev/null +++ b/src/NHibernate/Async/Cache/CachePutBatch.cs @@ -0,0 +1,55 @@ +//------------------------------------------------------------------------------ +// +// This code was generated by AsyncGenerator. +// +// Changes to this file may cause incorrect behavior and will be lost if +// the code is regenerated. +// +//------------------------------------------------------------------------------ + + +using System; +using System.Linq; +using System.Collections; +using System.Collections.Generic; +using System.Text; +using NHibernate.Engine; + +namespace NHibernate.Cache +{ + using System.Threading.Tasks; + using System.Threading; + internal partial class CachePutBatch : AbstractCacheBatch + { + + protected override async Task ExecuteAsync(CachePutData[] data, CancellationToken cancellationToken) + { + cancellationToken.ThrowIfCancellationRequested(); + var length = data.Length; + var keys = new CacheKey[length]; + var values = new object[length]; + var versions = new object[length]; + var versionComparers = new IComparer[length]; + var minimalPuts = new bool[length]; + + for (int i = 0; i < length; i++) + { + var item = data[i]; + keys[i] = item.Key; + values[i] = item.Value; + versions[i] = item.Version; + versionComparers[i] = item.VersionComparer; + minimalPuts[i] = item.MinimalPut; + } + + var factory = Session.Factory; + var cacheStrategy = CacheConcurrencyStrategy; + var puts = await (cacheStrategy.PutManyAsync(keys, values, Session.Timestamp, versions, versionComparers, minimalPuts, cancellationToken)).ConfigureAwait(false); + + if (factory.Statistics.IsStatisticsEnabled && puts.Any(o => o)) + { + factory.StatisticsImplementor.SecondLevelCachePut(cacheStrategy.RegionName); + } + } + } +} diff --git a/src/NHibernate/Async/Cache/IBatchableCache.cs b/src/NHibernate/Async/Cache/IBatchableCache.cs new file mode 100644 index 00000000000..f52892a2569 --- /dev/null +++ b/src/NHibernate/Async/Cache/IBatchableCache.cs @@ -0,0 +1,45 @@ +//------------------------------------------------------------------------------ +// +// This code was generated by AsyncGenerator. +// +// Changes to this file may cause incorrect behavior and will be lost if +// the code is regenerated. +// +//------------------------------------------------------------------------------ + + +using System; +using System.Collections.Generic; +using System.Text; + +namespace NHibernate.Cache +{ + using System.Threading.Tasks; + using System.Threading; + public partial interface IBatchableCache : IBatchableReadOnlyCache + { + /// + /// Add multiple objects to the cache. + /// + /// The keys to cache. + /// The objects to cache. + /// A cancellation token that can be used to cancel the work + Task PutManyAsync(object[] keys, object[] values, CancellationToken cancellationToken); + + /// + /// Lock the objects from being changed by another thread. + /// + /// The keys to lock. + /// A cancellation token that can be used to cancel the work + /// The value that was used to lock the keys. + Task LockManyAsync(object[] keys, CancellationToken cancellationToken); + + /// + /// Unlock the objects that were previously locked. + /// + /// The keys to unlock. + /// The value that was used to lock the keys. + /// A cancellation token that can be used to cancel the work + Task UnlockManyAsync(object[] keys, object lockValue, CancellationToken cancellationToken); + } +} diff --git a/src/NHibernate/Async/Cache/IBatchableCacheConcurrencyStrategy.cs b/src/NHibernate/Async/Cache/IBatchableCacheConcurrencyStrategy.cs new file mode 100644 index 00000000000..78a38e73d22 --- /dev/null +++ b/src/NHibernate/Async/Cache/IBatchableCacheConcurrencyStrategy.cs @@ -0,0 +1,47 @@ +//------------------------------------------------------------------------------ +// +// This code was generated by AsyncGenerator. +// +// Changes to this file may cause incorrect behavior and will be lost if +// the code is regenerated. +// +//------------------------------------------------------------------------------ + + +using System; +using System.Collections; +using System.Text; +using NHibernate.Cache.Entry; + +namespace NHibernate.Cache +{ + using System.Threading.Tasks; + using System.Threading; + public partial interface IBatchableCacheConcurrencyStrategy : ICacheConcurrencyStrategy + { + /// + /// Attempt to retrieve multiple objects from the Cache + /// + /// The keys (id) of the objects to get out of the Cache. + /// A timestamp prior to the transaction start time + /// A cancellation token that can be used to cancel the work + /// An array of cached objects or + /// + Task GetManyAsync(CacheKey[] keys, long timestamp, CancellationToken cancellationToken); + + /// + /// Attempt to cache objects, after loading them from the database. + /// + /// The keys (id) of the objects to put in the Cache. + /// The objects to put in the cache. + /// A timestamp prior to the transaction start time. + /// The version numbers of the objects we are putting. + /// The comparers to be used to compare version numbers + /// Indicates that the cache should avoid a put if the item is already cached. + /// A cancellation token that can be used to cancel the work + /// if the objects were successfully cached. + /// + Task PutManyAsync(CacheKey[] keys, object[] values, long timestamp, object[] versions, IComparer[] versionComparers, + bool[] minimalPuts, CancellationToken cancellationToken); + } +} diff --git a/src/NHibernate/Async/Cache/IBatchableReadOnlyCache.cs b/src/NHibernate/Async/Cache/IBatchableReadOnlyCache.cs new file mode 100644 index 00000000000..750c5df8280 --- /dev/null +++ b/src/NHibernate/Async/Cache/IBatchableReadOnlyCache.cs @@ -0,0 +1,29 @@ +//------------------------------------------------------------------------------ +// +// This code was generated by AsyncGenerator. +// +// Changes to this file may cause incorrect behavior and will be lost if +// the code is regenerated. +// +//------------------------------------------------------------------------------ + + +using System; +using System.Collections.Generic; +using System.Text; + +namespace NHibernate.Cache +{ + using System.Threading.Tasks; + using System.Threading; + public partial interface IBatchableReadOnlyCache + { + /// + /// Get multiple objects from the cache. + /// + /// The keys to be retrieved from the cache. + /// A cancellation token that can be used to cancel the work + /// + Task GetManyAsync(object[] keys, CancellationToken cancellationToken); + } +} diff --git a/src/NHibernate/Async/Cache/ICacheConcurrencyStrategy.cs b/src/NHibernate/Async/Cache/ICacheConcurrencyStrategy.cs index 0d843204aad..5474e15a68b 100644 --- a/src/NHibernate/Async/Cache/ICacheConcurrencyStrategy.cs +++ b/src/NHibernate/Async/Cache/ICacheConcurrencyStrategy.cs @@ -8,6 +8,7 @@ //------------------------------------------------------------------------------ +using System; using System.Collections; using NHibernate.Cache.Access; using NHibernate.Cache.Entry; @@ -121,4 +122,56 @@ public partial interface ICacheConcurrencyStrategy /// Task ClearAsync(CancellationToken cancellationToken); } -} \ No newline at end of file + + internal static partial class CacheConcurrencyStrategyExtensions + { + /// + /// Attempt to retrieve multiple objects from the Cache + /// + /// The cache concurrency strategy. + /// The keys (id) of the objects to get out of the Cache. + /// A timestamp prior to the transaction start time + /// A cancellation token that can be used to cancel the work + /// An array of cached objects or + /// + public static Task GetManyAsync(this ICacheConcurrencyStrategy cache, CacheKey[] keys, long timestamp, CancellationToken cancellationToken) + { + if (!(cache is IBatchableCacheConcurrencyStrategy batchableCache)) + { + throw new InvalidOperationException($"Cache concurrency strategy {cache.GetType()} does not support batching"); + } + if (cancellationToken.IsCancellationRequested) + { + return Task.FromCanceled(cancellationToken); + } + return batchableCache.GetManyAsync(keys, timestamp, cancellationToken); + } + + /// + /// Attempt to cache objects, after loading them from the database. + /// + /// The cache concurrency strategy. + /// The keys (id) of the objects to put in the Cache. + /// The objects to put in the cache. + /// A timestamp prior to the transaction start time. + /// The version numbers of the objects we are putting. + /// The comparers to be used to compare version numbers + /// Indicates that the cache should avoid a put if the item is already cached. + /// A cancellation token that can be used to cancel the work + /// if the objects were successfully cached. + /// + public static Task PutManyAsync(this ICacheConcurrencyStrategy cache, CacheKey[] keys, object[] values, long timestamp, + object[] versions, IComparer[] versionComparers, bool[] minimalPuts, CancellationToken cancellationToken) + { + if (!(cache is IBatchableCacheConcurrencyStrategy batchableCache)) + { + throw new InvalidOperationException($"Cache concurrency strategy {cache.GetType()} does not support batching"); + } + if (cancellationToken.IsCancellationRequested) + { + return Task.FromCanceled(cancellationToken); + } + return batchableCache.PutManyAsync(keys, values, timestamp, versions, versionComparers, minimalPuts, cancellationToken); + } + } +} diff --git a/src/NHibernate/Async/Cache/NonstrictReadWriteCache.cs b/src/NHibernate/Async/Cache/NonstrictReadWriteCache.cs index 0fad6274303..f8a21aa2d88 100644 --- a/src/NHibernate/Async/Cache/NonstrictReadWriteCache.cs +++ b/src/NHibernate/Async/Cache/NonstrictReadWriteCache.cs @@ -10,13 +10,15 @@ using System; using System.Collections; +using System.Collections.Generic; +using System.Linq; using NHibernate.Cache.Access; namespace NHibernate.Cache { using System.Threading.Tasks; using System.Threading; - public partial class NonstrictReadWriteCache : ICacheConcurrencyStrategy + public partial class NonstrictReadWriteCache : IBatchableCacheConcurrencyStrategy { /// @@ -42,6 +44,110 @@ public async Task GetAsync(CacheKey key, long txTimestamp, CancellationT return result; } + public Task GetManyAsync(CacheKey[] keys, long timestamp, CancellationToken cancellationToken) + { + if (_batchableReadOnlyCache == null) + { + throw new InvalidOperationException($"Cache {cache.GetType()} does not support batching get operation"); + } + if (cancellationToken.IsCancellationRequested) + { + return Task.FromCanceled(cancellationToken); + } + return InternalGetManyAsync(); + async Task InternalGetManyAsync() + { + if (log.IsDebugEnabled()) + { + log.Debug("Cache lookup: {0}", string.Join(",", keys.AsEnumerable())); + } + var results = await (_batchableReadOnlyCache.GetManyAsync(keys.Select(o => (object) o).ToArray(), cancellationToken)).ConfigureAwait(false); + if (!log.IsDebugEnabled()) + { + return results; + } + for (var i = 0; i < keys.Length; i++) + { + log.Debug(results[i] != null ? $"Cache hit: {keys[i]}" : $"Cache miss: {keys[i]}"); + } + return results; + } + } + + /// + /// Add multiple items to the cache + /// + public Task PutManyAsync(CacheKey[] keys, object[] values, long timestamp, object[] versions, IComparer[] versionComparers, + bool[] minimalPuts, CancellationToken cancellationToken) + { + if (_batchableCache == null) + { + throw new InvalidOperationException($"Cache {cache.GetType()} does not support batching operations"); + } + if (cancellationToken.IsCancellationRequested) + { + return Task.FromCanceled(cancellationToken); + } + return InternalPutManyAsync(); + async Task InternalPutManyAsync() + { + var result = new bool[keys.Length]; + if (timestamp == long.MinValue) + { + // MinValue means cache is disabled + return result; + } + + var checkKeys = new List(); + var checkKeyIndexes = new List(); + for (var i = 0; i < minimalPuts.Length; i++) + { + if (minimalPuts[i]) + { + checkKeys.Add(keys[i]); + checkKeyIndexes.Add(i); + } + } + var skipKeyIndexes = new HashSet(); + if (checkKeys.Any()) + { + var objects = await (_batchableCache.GetManyAsync(checkKeys.ToArray(), cancellationToken)).ConfigureAwait(false); + for (var i = 0; i < objects.Length; i++) + { + if (objects[i] != null) + { + if (log.IsDebugEnabled()) + { + log.Debug("item already cached: {0}", checkKeys[i]); + } + skipKeyIndexes.Add(checkKeyIndexes[i]); + } + } + } + + if (skipKeyIndexes.Count == keys.Length) + { + return result; + } + + var putKeys = new object[keys.Length - skipKeyIndexes.Count]; + var putValues = new object[putKeys.Length]; + var j = 0; + for (var i = 0; i < keys.Length; i++) + { + if (skipKeyIndexes.Contains(i)) + { + continue; + } + putKeys[j] = keys[i]; + putValues[j++] = values[i]; + result[i] = true; + } + await (_batchableCache.PutManyAsync(putKeys, putValues, cancellationToken)).ConfigureAwait(false); + return result; + } + } + /// /// Add an item to the cache /// diff --git a/src/NHibernate/Async/Cache/ReadOnlyCache.cs b/src/NHibernate/Async/Cache/ReadOnlyCache.cs index c7692bc6ca5..14530333b71 100644 --- a/src/NHibernate/Async/Cache/ReadOnlyCache.cs +++ b/src/NHibernate/Async/Cache/ReadOnlyCache.cs @@ -10,13 +10,15 @@ using System; using System.Collections; +using System.Collections.Generic; +using System.Linq; using NHibernate.Cache.Access; namespace NHibernate.Cache { using System.Threading.Tasks; using System.Threading; - public partial class ReadOnlyCache : ICacheConcurrencyStrategy + public partial class ReadOnlyCache : IBatchableCacheConcurrencyStrategy { public async Task GetAsync(CacheKey key, long timestamp, CancellationToken cancellationToken) @@ -30,6 +32,36 @@ public async Task GetAsync(CacheKey key, long timestamp, CancellationTok return result; } + public Task GetManyAsync(CacheKey[] keys, long timestamp, CancellationToken cancellationToken) + { + if (_batchableReadOnlyCache == null) + { + throw new InvalidOperationException($"Cache {cache.GetType()} does not support batching get operation"); + } + if (cancellationToken.IsCancellationRequested) + { + return Task.FromCanceled(cancellationToken); + } + return InternalGetManyAsync(); + async Task InternalGetManyAsync() + { + if (log.IsDebugEnabled()) + { + log.Debug("Cache lookup: {0}", string.Join(",", keys.AsEnumerable())); + } + var results = await (_batchableReadOnlyCache.GetManyAsync(keys.Select(o => (object) o).ToArray(), cancellationToken)).ConfigureAwait(false); + if (!log.IsDebugEnabled()) + { + return results; + } + for (var i = 0; i < keys.Length; i++) + { + log.Debug(results[i] != null ? $"Cache hit: {keys[i]}" : $"Cache miss: {keys[i]}"); + } + return results; + } + } + /// /// Unsupported! /// @@ -49,6 +81,77 @@ public Task LockAsync(CacheKey key, object version, CancellationToken } } + public Task PutManyAsync(CacheKey[] keys, object[] values, long timestamp, object[] versions, IComparer[] versionComparers, + bool[] minimalPuts, CancellationToken cancellationToken) + { + if (_batchableCache == null) + { + throw new InvalidOperationException($"Cache {cache.GetType()} does not support batching operations"); + } + if (cancellationToken.IsCancellationRequested) + { + return Task.FromCanceled(cancellationToken); + } + return InternalPutManyAsync(); + async Task InternalPutManyAsync() + { + var result = new bool[keys.Length]; + if (timestamp == long.MinValue) + { + // MinValue means cache is disabled + return result; + } + + var checkKeys = new List(); + var checkKeyIndexes = new List(); + for (var i = 0; i < minimalPuts.Length; i++) + { + if (minimalPuts[i]) + { + checkKeys.Add(keys[i]); + checkKeyIndexes.Add(i); + } + } + var skipKeyIndexes = new HashSet(); + if (checkKeys.Any()) + { + var objects = await (_batchableCache.GetManyAsync(checkKeys.Select(o => (object) o).ToArray(), cancellationToken)).ConfigureAwait(false); + for (var i = 0; i < objects.Length; i++) + { + if (objects[i] != null) + { + if (log.IsDebugEnabled()) + { + log.Debug("item already cached: {0}", checkKeys[i]); + } + skipKeyIndexes.Add(checkKeyIndexes[i]); + } + } + } + + if (skipKeyIndexes.Count == keys.Length) + { + return result; + } + + var putKeys = new object[keys.Length - skipKeyIndexes.Count]; + var putValues = new object[putKeys.Length]; + var j = 0; + for (var i = 0; i < keys.Length; i++) + { + if (skipKeyIndexes.Contains(i)) + { + continue; + } + putKeys[j] = keys[i]; + putValues[j++] = values[i]; + result[i] = true; + } + await (_batchableCache.PutManyAsync(putKeys, putValues, cancellationToken)).ConfigureAwait(false); + return result; + } + } + public async Task PutAsync(CacheKey key, object value, long timestamp, object version, IComparer versionComparator, bool minimalPut, CancellationToken cancellationToken) { diff --git a/src/NHibernate/Async/Cache/ReadWriteCache.cs b/src/NHibernate/Async/Cache/ReadWriteCache.cs index 217ac12cada..4c83d96b122 100644 --- a/src/NHibernate/Async/Cache/ReadWriteCache.cs +++ b/src/NHibernate/Async/Cache/ReadWriteCache.cs @@ -10,13 +10,15 @@ using System; using System.Collections; +using System.Collections.Generic; +using System.Linq; using NHibernate.Cache.Access; namespace NHibernate.Cache { using System.Threading.Tasks; using System.Threading; - public partial class ReadWriteCache : ICacheConcurrencyStrategy + public partial class ReadWriteCache : IBatchableCacheConcurrencyStrategy { private readonly NHibernate.Util.AsyncLock _lockObjectAsync = new NHibernate.Util.AsyncLock(); @@ -88,6 +90,53 @@ public async Task GetAsync(CacheKey key, long txTimestamp, CancellationT } } + public Task GetManyAsync(CacheKey[] keys, long timestamp, CancellationToken cancellationToken) + { + if (_batchableReadOnlyCache == null) + { + throw new InvalidOperationException($"Cache {cache.GetType()} does not support batching get operation"); + } + if (cancellationToken.IsCancellationRequested) + { + return Task.FromCanceled(cancellationToken); + } + return InternalGetManyAsync(); + async Task InternalGetManyAsync() + { + if (log.IsDebugEnabled()) + { + log.Debug("Cache lookup: {0}", string.Join(",", keys.AsEnumerable())); + } + var result = new object[keys.Length]; + using (await _lockObjectAsync.LockAsync()) + { + var lockables = await (_batchableReadOnlyCache.GetManyAsync(keys.Select(o => (object) o).ToArray(), cancellationToken)).ConfigureAwait(false); + for (var i = 0; i < lockables.Length; i++) + { + var lockable = (ILockable) lockables[i]; + var gettable = lockable != null && lockable.IsGettable(timestamp); + + if (gettable) + { + if (log.IsDebugEnabled()) + { + log.Debug("Cache hit: {0}", keys[i]); + } + result[i] = ((CachedItem) lockable).Value; + } + + if (log.IsDebugEnabled()) + { + log.Debug(lockable == null ? "Cache miss: {0}" : "Cached item was locked: {0}", keys[i]); + } + + result[i] = null; + } + } + return result; + } + } + /// /// Stop any other transactions reading or writing this item to/from /// the cache. Send them straight to the database instead. (The lock @@ -124,6 +173,100 @@ public async Task LockAsync(CacheKey key, object version, Cancellatio } } + /// + /// Do not add an item to the cache unless the current transaction + /// timestamp is later than the timestamp at which the item was + /// invalidated. (Otherwise, a stale item might be re-added if the + /// database is operating in repeatable read isolation mode.) + /// + /// Whether the items were actually put into the cache + public Task PutManyAsync(CacheKey[] keys, object[] values, long timestamp, object[] versions, IComparer[] versionComparers, + bool[] minimalPuts, CancellationToken cancellationToken) + { + if (_batchableCache == null) + { + throw new InvalidOperationException($"Cache {cache.GetType()} does not support batching operations"); + } + if (cancellationToken.IsCancellationRequested) + { + return Task.FromCanceled(cancellationToken); + } + return InternalPutManyAsync(); + async Task InternalPutManyAsync() + { + + var result = new bool[keys.Length]; + if (timestamp == long.MinValue) + { + // MinValue means cache is disabled + return result; + } + + using (await _lockObjectAsync.LockAsync()) + { + if (log.IsDebugEnabled()) + { + log.Debug("Caching: {0}", string.Join(",", keys.AsEnumerable())); + } + var keysArr = keys.Cast().ToArray(); + var lockAquired = false; + object lockValue = null; + try + { + lockValue = await (_batchableCache.LockManyAsync(keysArr, cancellationToken)).ConfigureAwait(false); + lockAquired = true; + var putBatch = new Dictionary(); + var lockables = await (_batchableCache.GetManyAsync(keysArr, cancellationToken)).ConfigureAwait(false); + for (var i = 0; i < keys.Length; i++) + { + var key = keys[i]; + var version = versions[i]; + var lockable = (ILockable) lockables[i]; + bool puttable = lockable == null || + lockable.IsPuttable(timestamp, version, versionComparers[i]); + if (puttable) + { + putBatch.Add(key, new CachedItem(values[i], cache.NextTimestamp(), version)); + if (log.IsDebugEnabled()) + { + log.Debug("Cached: {0}", key); + } + result[i] = true; + } + else + { + if (log.IsDebugEnabled()) + { + if (lockable.IsLock) + { + log.Debug("Item was locked: {0}", key); + } + else + { + log.Debug("Item was already cached: {0}", key); + } + } + result[i] = false; + } + } + + if (putBatch.Count > 0) + { + await (_batchableCache.PutManyAsync(putBatch.Keys.ToArray(), putBatch.Values.ToArray(), cancellationToken)).ConfigureAwait(false); + } + } + finally + { + if (lockAquired) + { + await (_batchableCache.UnlockManyAsync(keysArr, lockValue, cancellationToken)).ConfigureAwait(false); + } + } + } + return result; + } + } + /// /// Do not add an item to the cache unless the current transaction /// timestamp is later than the timestamp at which the item was diff --git a/src/NHibernate/Async/Cache/UpdateTimestampsCache.cs b/src/NHibernate/Async/Cache/UpdateTimestampsCache.cs index 9a086f16333..843160a1824 100644 --- a/src/NHibernate/Async/Cache/UpdateTimestampsCache.cs +++ b/src/NHibernate/Async/Cache/UpdateTimestampsCache.cs @@ -114,35 +114,33 @@ public virtual async Task IsUpToDateAsync(ISet spaces, long timest cancellationToken.ThrowIfCancellationRequested(); using (await _isUpToDate.LockAsync()) { - foreach (string space in spaces) + if (_batchUpdateTimestamps != null) { - object lastUpdate = await (updateTimestamps.GetAsync(space, cancellationToken)).ConfigureAwait(false); - if (lastUpdate == null) + var keys = new object[spaces.Count]; + var index = 0; + foreach (var space in spaces) { - //the last update timestamp was lost from the cache - //(or there were no updates since startup!) - - //NOTE: commented out, since users found the "safe" behavior - // counter-intuitive when testing, and we couldn't deal - // with all the forum posts :-( - //updateTimestamps.put( space, new Long( updateTimestamps.nextTimestamp() ) ); - //result = false; // safer - - //OR: put a timestamp there, to avoid subsequent expensive - // lookups to a distributed cache - this is no good, since - // it is non-threadsafe (could hammer effect of an actual - // invalidation), and because this is not the way our - // preferred distributed caches work (they work by - // replication) - //updateTimestamps.put( space, new Long(Long.MIN_VALUE) ); + keys[index++] = space; } - else + var lastUpdates = await (_batchUpdateTimestamps.GetManyAsync(keys, cancellationToken)).ConfigureAwait(false); + foreach (var lastUpdate in lastUpdates) { - if ((long) lastUpdate >= timestamp) + if (IsOutdated(lastUpdate, timestamp)) { return false; } } + return true; + } + + foreach (string space in spaces) + { + object lastUpdate = await (updateTimestamps.GetAsync(space, cancellationToken)).ConfigureAwait(false); + if (IsOutdated(lastUpdate, timestamp)) + { + return false; + } + } return true; } diff --git a/src/NHibernate/Async/Engine/BatchFetchQueue.cs b/src/NHibernate/Async/Engine/BatchFetchQueue.cs index 470619da3d5..66ff2292419 100644 --- a/src/NHibernate/Async/Engine/BatchFetchQueue.cs +++ b/src/NHibernate/Async/Engine/BatchFetchQueue.cs @@ -8,6 +8,7 @@ //------------------------------------------------------------------------------ +using System; using System.Collections; using NHibernate.Cache; using NHibernate.Collection; @@ -15,6 +16,7 @@ using NHibernate.Persister.Entity; using NHibernate.Util; using System.Collections.Generic; +using System.Linq; using Iesi.Collections.Generic; namespace NHibernate.Engine @@ -32,67 +34,186 @@ public partial class BatchFetchQueue /// the maximum number of keys to return /// A cancellation token that can be used to cancel the work /// an array of collection keys, of length batchSize (padded with nulls) - public async Task GetCollectionBatchAsync(ICollectionPersister collectionPersister, object id, int batchSize, CancellationToken cancellationToken) + public Task GetCollectionBatchAsync(ICollectionPersister collectionPersister, object id, int batchSize, CancellationToken cancellationToken) + { + if (cancellationToken.IsCancellationRequested) + { + return Task.FromCanceled(cancellationToken); + } + return GetCollectionBatchAsync(collectionPersister, id, batchSize, true, null, cancellationToken); + } + + /// + /// Get a batch of uninitialized collection keys for a given role + /// + /// The persister for the collection role. + /// A key that must be included in the batch fetch + /// the maximum number of keys to return + /// Whether to check the cache for uninitialized collection keys. + /// An array that will be filled with collection entries if set. + /// A cancellation token that can be used to cancel the work + /// An array of collection keys, of length (padded with nulls) + internal async Task GetCollectionBatchAsync(ICollectionPersister collectionPersister, object id, int batchSize, bool checkCache, + CollectionEntry[] collectionEntries, CancellationToken cancellationToken) { cancellationToken.ThrowIfCancellationRequested(); - object[] keys = new object[batchSize]; - keys[0] = id; - int i = 1; - int end = -1; - bool checkForEnd = false; + var keys = new object[batchSize]; + keys[0] = id; // The first element of array is reserved for the actual instance we are loading + var i = 1; // The current index of keys array + int? keyIndex = null; // The index of the demanding key in the linked hash set + var checkForEnd = false; // Stores whether we found the demanded collection and reached the batchSize + var index = 0; // The current index of the linked hash set iteration + // List of collection entries that haven't been checked for their existance in the cache. Besides the collection entry, + // the index where the entry was found is also stored in order to correctly order the returning keys. + var collectionKeys = new List, int>>(batchSize); + var batchableCache = collectionPersister.Cache?.Cache as IBatchableReadOnlyCache; + + if (!batchLoadableCollections.TryGetValue(collectionPersister.Role, out var map)) + { + return keys; + } - if (batchLoadableCollections.TryGetValue(collectionPersister.Role, out var map)) + foreach (KeyValuePair me in map) { - foreach (KeyValuePair me in map) + cancellationToken.ThrowIfCancellationRequested(); + if (await (ProcessKeyAsync(me)).ConfigureAwait(false)) { - var ce = me.Key; - var collection = me.Value; - if (ce.LoadedKey == null) - { - // the LoadedKey of the CollectionEntry might be null as it might have been reset to null - // (see for example Collections.ProcessDereferencedCollection() - // and CollectionEntry.AfterAction()) - // though we clear the queue on flush, it seems like a good idea to guard - // against potentially null LoadedKey:s - continue; - } + return keys; + } + index++; + } - if (collection.WasInitialized) + // If by the end of the iteration we haven't filled the whole array of keys to fetch, + // we have to check the remaining collection keys. + while (i != batchSize && collectionKeys.Count > 0) + { + cancellationToken.ThrowIfCancellationRequested(); + if (await (CheckCacheAndProcessResultAsync()).ConfigureAwait(false)) + { + return keys; + } + } + + return keys; //we ran out of keys to try + + // Calls the cache to check if any of the keys is cached and continues the key processing for those + // that are not stored in the cache. + async Task CheckCacheAndProcessResultAsync() + { + var fromIndex = batchableCache != null + ? collectionKeys.Count - Math.Min(batchSize, collectionKeys.Count) + : 0; + var toIndex = collectionKeys.Count - 1; + var indexes = GetSortedKeyIndexes(collectionKeys, keyIndex.Value, fromIndex, toIndex); + if (batchableCache == null) + { + for (var j = 0; j < collectionKeys.Count; j++) { - log.Warn("Encountered initialized collection in BatchFetchQueue, this should not happen."); - continue; + if (await (ProcessKeyAsync(collectionKeys[indexes[j]].Key)).ConfigureAwait(false)) + { + return true; + } } - - if (checkForEnd && i == end) + } + else + { + var results = await (AreCachedAsync(collectionKeys, indexes, collectionPersister, batchableCache, checkCache, cancellationToken)).ConfigureAwait(false); + var k = toIndex; + for (var j = 0; j < results.Length; j++) { - return keys; //the first key found after the given key + if (!results[j] && await (ProcessKeyAsync(collectionKeys[indexes[j]].Key, true)).ConfigureAwait(false)) + { + return true; + } } + } - bool isEqual = collectionPersister.KeyType.IsEqual(id, ce.LoadedKey, collectionPersister.Factory); + for (var j = toIndex; j >= fromIndex; j--) + { + collectionKeys.RemoveAt(j); + } + return false; + } - if (isEqual) + async Task ProcessKeyAsync(KeyValuePair me, bool ignoreCache = false) + { + var ce = me.Key; + var collection = me.Value; + if (ce.LoadedKey == null) + { + // the LoadedKey of the CollectionEntry might be null as it might have been reset to null + // (see for example Collections.ProcessDereferencedCollection() + // and CollectionEntry.AfterAction()) + // though we clear the queue on flush, it seems like a good idea to guard + // against potentially null LoadedKey:s + return false; + } + + if (collection.WasInitialized) + { + log.Warn("Encountered initialized collection in BatchFetchQueue, this should not happen."); + return false; + } + + if (checkForEnd && (index >= keyIndex.Value + batchSize || index == map.Count)) + { + return true; + } + if (collectionPersister.KeyType.IsEqual(id, ce.LoadedKey, collectionPersister.Factory)) + { + if (collectionEntries != null) { - end = i; - //checkForEnd = false; + collectionEntries[0] = ce; } - else if (!await (IsCachedAsync(ce.LoadedKey, collectionPersister, cancellationToken)).ConfigureAwait(false)) + keyIndex = index; + } + else if (!checkCache || batchableCache == null) + { + if (!keyIndex.HasValue || index < keyIndex.Value) { - keys[i++] = ce.LoadedKey; - //count++; + collectionKeys.Add(new KeyValuePair, int>(me, index)); + return false; } - if (i == batchSize) + if (!checkCache || !await (IsCachedAsync(ce.LoadedKey, collectionPersister, cancellationToken)).ConfigureAwait(false)) { - i = 1; //end of array, start filling again from start - if (end != -1) + if (collectionEntries != null) { - checkForEnd = true; + collectionEntries[i] = ce; } + keys[i++] = ce.LoadedKey; + } + } + else if (ignoreCache) + { + if (collectionEntries != null) + { + collectionEntries[i] = ce; + } + keys[i++] = ce.LoadedKey; + } + else + { + collectionKeys.Add(new KeyValuePair, int>(me, index)); + // Check the cache only when we have collected as many keys as are needed to fill the batch, + // that are after the demanded key. + if (!keyIndex.HasValue || index < keyIndex.Value + batchSize) + { + return false; + } + return await (CheckCacheAndProcessResultAsync()).ConfigureAwait(false); + } + if (i == batchSize) + { + i = 1; // End of array, start filling again from start + if (keyIndex.HasValue) + { + checkForEnd = true; + return index >= keyIndex.Value + batchSize || index == map.Count; } } + return false; } - - return keys; //we ran out of keys to try } /// @@ -105,45 +226,157 @@ public async Task GetCollectionBatchAsync(ICollectionPersister collect /// The maximum number of keys to return /// A cancellation token that can be used to cancel the work /// an array of identifiers, of length batchSize (possibly padded with nulls) - public async Task GetEntityBatchAsync(IEntityPersister persister, object id, int batchSize, CancellationToken cancellationToken) + public Task GetEntityBatchAsync(IEntityPersister persister, object id, int batchSize, CancellationToken cancellationToken) + { + if (cancellationToken.IsCancellationRequested) + { + return Task.FromCanceled(cancellationToken); + } + return GetEntityBatchAsync(persister, id, batchSize, true, cancellationToken); + } + + /// + /// Get a batch of unloaded identifiers for this class, using a slightly + /// complex algorithm that tries to grab keys registered immediately after + /// the given key. + /// + /// The persister for the entities being loaded. + /// The identifier of the entity currently demanding load. + /// The maximum number of keys to return + /// Whether to check the cache for uninitialized keys. + /// A cancellation token that can be used to cancel the work + /// An array of identifiers, of length (possibly padded with nulls) + internal async Task GetEntityBatchAsync(IEntityPersister persister, object id, int batchSize, bool checkCache, CancellationToken cancellationToken) { cancellationToken.ThrowIfCancellationRequested(); - object[] ids = new object[batchSize]; - ids[0] = id; //first element of array is reserved for the actual instance we are loading! - int i = 1; - int end = -1; - bool checkForEnd = false; + var ids = new object[batchSize]; + ids[0] = id; // The first element of array is reserved for the actual instance we are loading + var i = 1; // The current index of ids array + int? idIndex = null; // The index of the demanding id in the linked hash set + var checkForEnd = false; // Stores whether we found the demanded id and reached the batchSize + var index = 0; // The current index of the linked hash set iteration + // List of entity keys that haven't been checked for their existance in the cache. Besides the entity key, + // the index where the key was found is also stored in order to correctly order the returning keys. + var entityKeys = new List>(batchSize); + var batchableCache = persister.Cache?.Cache as IBatchableReadOnlyCache; + + if (!batchLoadableEntityKeys.TryGetValue(persister.EntityName, out var set)) + { + return ids; + } - if (batchLoadableEntityKeys.TryGetValue(persister.EntityName, out var set)) + foreach (var key in set) { - foreach (var key in set) + cancellationToken.ThrowIfCancellationRequested(); + if (await (ProcessKeyAsync(key)).ConfigureAwait(false)) { - //TODO: this needn't exclude subclasses... - if (checkForEnd && i == end) - { - //the first id found after the given id - return ids; - } - if (persister.IdentifierType.IsEqual(id, key.Identifier)) + return ids; + } + index++; + } + + // If by the end of the iteration we haven't filled the whole array of ids to fetch, + // we have to check the remaining entity keys. + while (i != batchSize && entityKeys.Count > 0) + { + cancellationToken.ThrowIfCancellationRequested(); + if (await (CheckCacheAndProcessResultAsync()).ConfigureAwait(false)) + { + return ids; + } + } + + return ids; + + // Calls the cache to check if any of the keys is cached and continues the key processing for those + // that are not stored in the cache. + async Task CheckCacheAndProcessResultAsync() + { + var fromIndex = batchableCache != null + ? entityKeys.Count - Math.Min(batchSize, entityKeys.Count) + : 0; + var toIndex = entityKeys.Count - 1; + var indexes = GetSortedKeyIndexes(entityKeys, idIndex.Value, fromIndex, toIndex); + if (batchableCache == null) + { + for (var j = 0; j < entityKeys.Count; j++) { - end = i; + if (await (ProcessKeyAsync(entityKeys[indexes[j]].Key)).ConfigureAwait(false)) + { + return true; + } } - else + } + else + { + var results = await (AreCachedAsync(entityKeys, indexes, persister, batchableCache, checkCache, cancellationToken)).ConfigureAwait(false); + var k = toIndex; + for (var j = 0; j < results.Length; j++) { - if (!await (IsCachedAsync(key, persister, cancellationToken)).ConfigureAwait(false)) + if (!results[j] && await (ProcessKeyAsync(entityKeys[indexes[j]].Key, true)).ConfigureAwait(false)) { - ids[i++] = key.Identifier; + return true; } } - if (i == batchSize) + } + + for (var j = toIndex; j >= fromIndex; j--) + { + entityKeys.RemoveAt(j); + } + return false; + } + + async Task ProcessKeyAsync(EntityKey key, bool ignoreCache = false) + { + //TODO: this needn't exclude subclasses... + if (checkForEnd && (index >= idIndex.Value + batchSize || index == set.Count)) + { + return true; + } + if (persister.IdentifierType.IsEqual(id, key.Identifier)) + { + idIndex = index; + } + else if (!checkCache || batchableCache == null) + { + if (!idIndex.HasValue || index < idIndex.Value) + { + entityKeys.Add(new KeyValuePair(key, index)); + return false; + } + + if (!checkCache || !await (IsCachedAsync(key, persister, cancellationToken)).ConfigureAwait(false)) { - i = 1; //end of array, start filling again from start - if (end != -1) - checkForEnd = true; + ids[i++] = key.Identifier; } } + else if (ignoreCache) + { + ids[i++] = key.Identifier; + } + else + { + entityKeys.Add(new KeyValuePair(key, index)); + // Check the cache only when we have collected as many keys as are needed to fill the batch, + // that are after the demanded key. + if (!idIndex.HasValue || index < idIndex.Value + batchSize) + { + return false; + } + return await (CheckCacheAndProcessResultAsync()).ConfigureAwait(false); + } + if (i == batchSize) + { + i = 1; // End of array, start filling again from start + if (idIndex.HasValue) + { + checkForEnd = true; + return index >= idIndex.Value + batchSize || index == set.Count; + } + } + return false; } - return ids; //we ran out of ids to try } private async Task IsCachedAsync(EntityKey entityKey, IEntityPersister persister, CancellationToken cancellationToken) @@ -167,5 +400,82 @@ private async Task IsCachedAsync(object collectionKey, ICollectionPersiste } return false; } + + /// + /// Checks whether the given entity key indexes are cached. + /// + /// The list of pairs of entity keys and thier indexes. + /// The array of indexes of that have to be checked. + /// The entity persister. + /// The batchable cache. + /// Whether to check the cache or just return for all keys. + /// A cancellation token that can be used to cancel the work + /// An array of booleans that contains the result for each key. + private async Task AreCachedAsync(List> entityKeys, int[] keyIndexes, IEntityPersister persister, + IBatchableReadOnlyCache batchableCache, bool checkCache, CancellationToken cancellationToken) + { + cancellationToken.ThrowIfCancellationRequested(); + var result = new bool[keyIndexes.Length]; + if (!checkCache || !persister.HasCache || !context.Session.CacheMode.HasFlag(CacheMode.Get)) + { + return result; + } + var cacheKeys = new object[keyIndexes.Length]; + var i = 0; + foreach (var index in keyIndexes) + { + var entityKey = entityKeys[index].Key; + cacheKeys[i++] = context.Session.GenerateCacheKey( + entityKey.Identifier, + persister.IdentifierType, + entityKey.EntityName); + } + var cacheResult = await (batchableCache.GetManyAsync(cacheKeys, cancellationToken)).ConfigureAwait(false); + for (var j = 0; j < result.Length; j++) + { + result[j] = cacheResult[j] != null; + } + + return result; + } + + /// + /// Checks whether the given collection key indexes are cached. + /// + /// The list of pairs of collection entries and thier indexes. + /// The array of indexes of that have to be checked. + /// The collection persister. + /// The batchable cache. + /// Whether to check the cache or just return for all keys. + /// A cancellation token that can be used to cancel the work + /// An array of booleans that contains the result for each key. + private async Task AreCachedAsync(List, int>> collectionKeys, + int[] keyIndexes, ICollectionPersister persister, IBatchableReadOnlyCache batchableCache, + bool checkCache, CancellationToken cancellationToken) + { + cancellationToken.ThrowIfCancellationRequested(); + var result = new bool[keyIndexes.Length]; + if (!checkCache || !persister.HasCache || !context.Session.CacheMode.HasFlag(CacheMode.Get)) + { + return result; + } + var cacheKeys = new object[keyIndexes.Length]; + var i = 0; + foreach (var index in keyIndexes) + { + var collectionKey = collectionKeys[index].Key; + cacheKeys[i++] = context.Session.GenerateCacheKey( + collectionKey.Key.LoadedKey, + persister.KeyType, + persister.Role); + } + var cacheResult = await (batchableCache.GetManyAsync(cacheKeys, cancellationToken)).ConfigureAwait(false); + for (var j = 0; j < result.Length; j++) + { + result[j] = cacheResult[j] != null; + } + + return result; + } } } diff --git a/src/NHibernate/Async/Engine/Loading/CollectionLoadContext.cs b/src/NHibernate/Async/Engine/Loading/CollectionLoadContext.cs index b13ba5576e4..1b73bc3322f 100644 --- a/src/NHibernate/Async/Engine/Loading/CollectionLoadContext.cs +++ b/src/NHibernate/Async/Engine/Loading/CollectionLoadContext.cs @@ -8,6 +8,7 @@ //------------------------------------------------------------------------------ +using System; using System.Collections; using System.Collections.Generic; using System.Data.Common; @@ -110,10 +111,13 @@ private async Task EndLoadingCollectionsAsync(ICollectionPersister persister, IL log.Debug("{0} collections were found in result set for role: {1}", count, persister.Role); } + var cacheBatcher = new CacheBatcher(LoadContext.PersistenceContext.Session); for (int i = 0; i < count; i++) { - await (EndLoadingCollectionAsync(matchedCollectionEntries[i], persister, cancellationToken)).ConfigureAwait(false); + await (EndLoadingCollectionAsync(matchedCollectionEntries[i], persister, + data => cacheBatcher.AddToBatch(persister, data), cancellationToken)).ConfigureAwait(false); } + await (cacheBatcher.ExecuteBatchAsync(cancellationToken)).ConfigureAwait(false); if (log.IsDebugEnabled()) { @@ -121,7 +125,8 @@ private async Task EndLoadingCollectionsAsync(ICollectionPersister persister, IL } } - private async Task EndLoadingCollectionAsync(LoadingCollectionEntry lce, ICollectionPersister persister, CancellationToken cancellationToken) + private async Task EndLoadingCollectionAsync(LoadingCollectionEntry lce, ICollectionPersister persister, + Action cacheBatchingHandler, CancellationToken cancellationToken) { cancellationToken.ThrowIfCancellationRequested(); if (log.IsDebugEnabled()) @@ -161,7 +166,7 @@ private async Task EndLoadingCollectionAsync(LoadingCollectionEntry lce, ICollec if (addToCache) { - await (AddCollectionToCacheAsync(lce, persister, cancellationToken)).ConfigureAwait(false); + await (AddCollectionToCacheAsync(lce, persister, cacheBatchingHandler, cancellationToken)).ConfigureAwait(false); } if (log.IsDebugEnabled()) @@ -179,8 +184,10 @@ private async Task EndLoadingCollectionAsync(LoadingCollectionEntry lce, ICollec /// Add the collection to the second-level cache /// The entry representing the collection to add /// The persister + /// The action for handling cache batching /// A cancellation token that can be used to cancel the work - private async Task AddCollectionToCacheAsync(LoadingCollectionEntry lce, ICollectionPersister persister, CancellationToken cancellationToken) + private async Task AddCollectionToCacheAsync(LoadingCollectionEntry lce, ICollectionPersister persister, + Action cacheBatchingHandler, CancellationToken cancellationToken) { cancellationToken.ThrowIfCancellationRequested(); ISessionImplementor session = LoadContext.PersistenceContext.Session; @@ -221,13 +228,27 @@ private async Task AddCollectionToCacheAsync(LoadingCollectionEntry lce, ICollec CollectionCacheEntry entry = new CollectionCacheEntry(lce.Collection, persister); CacheKey cacheKey = session.GenerateCacheKey(lce.Key, persister.KeyType, persister.Role); - bool put = await (persister.Cache.PutAsync(cacheKey, persister.CacheEntryStructure.Structure(entry), - session.Timestamp, version, versionComparator, - factory.Settings.IsMinimalPutsEnabled && session.CacheMode != CacheMode.Refresh, cancellationToken)).ConfigureAwait(false); - if (put && factory.Statistics.IsStatisticsEnabled) + if (persister.GetBatchSize() > 1 && persister.Cache.IsBatchingPutSupported()) { - factory.StatisticsImplementor.SecondLevelCachePut(persister.Cache.RegionName); + cacheBatchingHandler( + new CachePutData( + cacheKey, + persister.CacheEntryStructure.Structure(entry), + version, + versionComparator, + factory.Settings.IsMinimalPutsEnabled && session.CacheMode != CacheMode.Refresh)); + } + else + { + bool put = await (persister.Cache.PutAsync(cacheKey, persister.CacheEntryStructure.Structure(entry), + session.Timestamp, version, versionComparator, + factory.Settings.IsMinimalPutsEnabled && session.CacheMode != CacheMode.Refresh, cancellationToken)).ConfigureAwait(false); + + if (put && factory.Statistics.IsStatisticsEnabled) + { + factory.StatisticsImplementor.SecondLevelCachePut(persister.Cache.RegionName); + } } } } diff --git a/src/NHibernate/Async/Engine/TwoPhaseLoad.cs b/src/NHibernate/Async/Engine/TwoPhaseLoad.cs index 6579b270c6f..dda0ea12e0e 100644 --- a/src/NHibernate/Async/Engine/TwoPhaseLoad.cs +++ b/src/NHibernate/Async/Engine/TwoPhaseLoad.cs @@ -19,6 +19,7 @@ using NHibernate.Proxy; using NHibernate.Type; using NHibernate.Properties; +using System; namespace NHibernate.Engine { @@ -33,7 +34,23 @@ public static partial class TwoPhaseLoad /// between the entities which were instantiated and had their state /// "hydrated" into an array /// - public static async Task InitializeEntityAsync(object entity, bool readOnly, ISessionImplementor session, PreLoadEvent preLoadEvent, PostLoadEvent postLoadEvent, CancellationToken cancellationToken) + public static Task InitializeEntityAsync(object entity, bool readOnly, ISessionImplementor session, PreLoadEvent preLoadEvent, PostLoadEvent postLoadEvent, CancellationToken cancellationToken) + { + if (cancellationToken.IsCancellationRequested) + { + return Task.FromCanceled(cancellationToken); + } + return InitializeEntityAsync(entity, readOnly, session, preLoadEvent, postLoadEvent, null, cancellationToken); + } + + /// + /// Perform the second step of 2-phase load. Fully initialize the entity instance. + /// After processing a JDBC result set, we "resolve" all the associations + /// between the entities which were instantiated and had their state + /// "hydrated" into an array + /// + internal static async Task InitializeEntityAsync(object entity, bool readOnly, ISessionImplementor session, PreLoadEvent preLoadEvent, PostLoadEvent postLoadEvent, + Action cacheBatchingHandler, CancellationToken cancellationToken) { cancellationToken.ThrowIfCancellationRequested(); //TODO: Should this be an InitializeEntityEventListener??? (watch out for performance!) @@ -95,14 +112,29 @@ public static async Task InitializeEntityAsync(object entity, bool readOnly, ISe CacheEntry entry = new CacheEntry(hydratedState, persister, entityEntry.LoadedWithLazyPropertiesUnfetched, version, session, entity); CacheKey cacheKey = session.GenerateCacheKey(id, persister.IdentifierType, persister.RootEntityName); - bool put = - await (persister.Cache.PutAsync(cacheKey, persister.CacheEntryStructure.Structure(entry), session.Timestamp, version, - persister.IsVersioned ? persister.VersionType.Comparator : null, - UseMinimalPuts(session, entityEntry), cancellationToken)).ConfigureAwait(false); - if (put && factory.Statistics.IsStatisticsEnabled) + if (cacheBatchingHandler != null && persister.IsBatchLoadable && persister.Cache.IsBatchingPutSupported()) + { + cacheBatchingHandler( + persister, + new CachePutData( + cacheKey, + persister.CacheEntryStructure.Structure(entry), + version, + persister.IsVersioned ? persister.VersionType.Comparator : null, + UseMinimalPuts(session, entityEntry))); + } + else { - factory.StatisticsImplementor.SecondLevelCachePut(persister.Cache.RegionName); + bool put = + await (persister.Cache.PutAsync(cacheKey, persister.CacheEntryStructure.Structure(entry), session.Timestamp, version, + persister.IsVersioned ? persister.VersionType.Comparator : null, + UseMinimalPuts(session, entityEntry), cancellationToken)).ConfigureAwait(false); + + if (put && factory.Statistics.IsStatisticsEnabled) + { + factory.StatisticsImplementor.SecondLevelCachePut(persister.Cache.RegionName); + } } } diff --git a/src/NHibernate/Async/Event/Default/DefaultInitializeCollectionEventListener.cs b/src/NHibernate/Async/Event/Default/DefaultInitializeCollectionEventListener.cs index 1f0d706bed1..4e71de9ca1d 100644 --- a/src/NHibernate/Async/Event/Default/DefaultInitializeCollectionEventListener.cs +++ b/src/NHibernate/Async/Event/Default/DefaultInitializeCollectionEventListener.cs @@ -9,6 +9,7 @@ using System; +using System.Collections.Generic; using System.Diagnostics; using NHibernate.Cache; @@ -88,48 +89,79 @@ private async Task InitializeCollectionFromCacheAsync(object id, ICollecti { return false; } - else - { - ISessionFactoryImplementor factory = source.Factory; - - CacheKey ck = source.GenerateCacheKey(id, persister.KeyType, persister.Role); - object ce = await (persister.Cache.GetAsync(ck, source.Timestamp, cancellationToken)).ConfigureAwait(false); - if (factory.Statistics.IsStatisticsEnabled) + var batchSize = persister.GetBatchSize(); + if (batchSize > 1 && persister.Cache.IsBatchingGetSupported()) + { + var collectionEntries = new CollectionEntry[batchSize]; + // The first item in the array is the item that we want to load + var collectionBatch = await (source.PersistenceContext.BatchFetchQueue + .GetCollectionBatchAsync(persister, id, batchSize, false, collectionEntries, cancellationToken)).ConfigureAwait(false); + // Ignore null values as the retrieved batch may contains them when there are not enough + // uninitialized collection in the queue + var keys = new List(batchSize); + for (var i = 0; i < collectionBatch.Length; i++) { - if (ce == null) + var key = collectionBatch[i]; + if (key == null) { - factory.StatisticsImplementor.SecondLevelCacheMiss(persister.Cache.RegionName); - } - else - { - factory.StatisticsImplementor.SecondLevelCacheHit(persister.Cache.RegionName); + break; } + keys.Add(source.GenerateCacheKey(key, persister.KeyType, persister.Role)); } - - if (ce == null) + var cachedObjects = await (persister.Cache.GetManyAsync(keys.ToArray(), source.Timestamp, cancellationToken)).ConfigureAwait(false); + for (var i = 1; i < cachedObjects.Length; i++) { - log.Debug("Collection cache miss: {0}", ck); - } - else - { - log.Debug("Collection cache hit: {0}", ck); + var coll = source.PersistenceContext.BatchFetchQueue.GetBatchLoadableCollection(persister, collectionEntries[i]); + await (AssembleAsync(keys[i], cachedObjects[i], persister, source, coll, collectionBatch[i], false, cancellationToken)).ConfigureAwait(false); } + return await (AssembleAsync(keys[0], cachedObjects[0], persister, source, collection, id, true, cancellationToken)).ConfigureAwait(false); + } + + var cacheKey = source.GenerateCacheKey(id, persister.KeyType, persister.Role); + var cachedObject = await (persister.Cache.GetAsync(cacheKey, source.Timestamp, cancellationToken)).ConfigureAwait(false); + return await (AssembleAsync(cacheKey, cachedObject, persister, source, collection, id, true, cancellationToken)).ConfigureAwait(false); + } + private async Task AssembleAsync(CacheKey ck, object ce, ICollectionPersister persister, ISessionImplementor source, + IPersistentCollection collection, object id, bool alterStatistics, CancellationToken cancellationToken) + { + cancellationToken.ThrowIfCancellationRequested(); + ISessionFactoryImplementor factory = source.Factory; + if (factory.Statistics.IsStatisticsEnabled && alterStatistics) + { if (ce == null) { - return false; + factory.StatisticsImplementor.SecondLevelCacheMiss(persister.Cache.RegionName); } else { - IPersistenceContext persistenceContext = source.PersistenceContext; + factory.StatisticsImplementor.SecondLevelCacheHit(persister.Cache.RegionName); + } + } - CollectionCacheEntry cacheEntry = (CollectionCacheEntry)persister.CacheEntryStructure.Destructure(ce, factory); - await (cacheEntry.AssembleAsync(collection, persister, persistenceContext.GetCollectionOwner(id, persister), cancellationToken)).ConfigureAwait(false); + if (ce == null) + { + log.Debug("Collection cache miss: {0}", ck); + } + else + { + log.Debug("Collection cache hit: {0}", ck); + } - persistenceContext.GetCollectionEntry(collection).PostInitialize(collection, persistenceContext); - return true; - } + if (ce == null) + { + return false; + } + else + { + IPersistenceContext persistenceContext = source.PersistenceContext; + + CollectionCacheEntry cacheEntry = (CollectionCacheEntry) persister.CacheEntryStructure.Destructure(ce, factory); + await (cacheEntry.AssembleAsync(collection, persister, persistenceContext.GetCollectionOwner(id, persister), cancellationToken)).ConfigureAwait(false); + + persistenceContext.GetCollectionEntry(collection).PostInitialize(collection, persistenceContext); + return true; } } } diff --git a/src/NHibernate/Async/Event/Default/DefaultLoadEventListener.cs b/src/NHibernate/Async/Event/Default/DefaultLoadEventListener.cs index 8247333e98c..24d2752e3cd 100644 --- a/src/NHibernate/Async/Event/Default/DefaultLoadEventListener.cs +++ b/src/NHibernate/Async/Event/Default/DefaultLoadEventListener.cs @@ -9,6 +9,7 @@ using System; +using System.Collections.Generic; using System.Diagnostics; using System.Text; using NHibernate.Cache; @@ -405,14 +406,50 @@ protected virtual async Task LoadFromSecondLevelCacheAsync(LoadEvent @ev bool useCache = persister.HasCache && source.CacheMode .HasFlag(CacheMode.Get) && @event.LockMode.LessThan(LockMode.Read); - if (useCache) + if (!useCache) { - ISessionFactoryImplementor factory = source.Factory; - - CacheKey ck = source.GenerateCacheKey(@event.EntityId, persister.IdentifierType, persister.RootEntityName); - object ce = await (persister.Cache.GetAsync(ck, source.Timestamp, cancellationToken)).ConfigureAwait(false); + return null; + } + ISessionFactoryImplementor factory = source.Factory; + var batchSize = persister.GetBatchSize(); + if (batchSize > 1 && persister.Cache.IsBatchingGetSupported()) + { + // The first item in the array is the item that we want to load + var entityBatch = + await (source.PersistenceContext.BatchFetchQueue.GetEntityBatchAsync(persister, @event.EntityId, batchSize, false, cancellationToken)).ConfigureAwait(false); + // Ignore null values as the retrieved batch may contains them when there are not enough + // uninitialized entities in the queue + var keys = new List(batchSize); + for (var i = 0; i < entityBatch.Length; i++) + { + var key = entityBatch[i]; + if (key == null) + { + break; + } + keys.Add(source.GenerateCacheKey(key, persister.IdentifierType, persister.RootEntityName)); + } + var cachedObjects = await (persister.Cache.GetManyAsync(keys.ToArray(), source.Timestamp, cancellationToken)).ConfigureAwait(false); + for (var i = 1; i < cachedObjects.Length; i++) + { + cancellationToken.ThrowIfCancellationRequested(); + await (AssembleAsync( + keys[i], + cachedObjects[i], + new LoadEvent(entityBatch[i], @event.EntityClassName, @event.LockMode, @event.Session), + false)).ConfigureAwait(false); + } + cancellationToken.ThrowIfCancellationRequested(); + return await (AssembleAsync(keys[0], cachedObjects[0], @event, true)).ConfigureAwait(false); + } + var cacheKey = source.GenerateCacheKey(@event.EntityId, persister.IdentifierType, persister.RootEntityName); + var cachedObject = await (persister.Cache.GetAsync(cacheKey, source.Timestamp, cancellationToken)).ConfigureAwait(false); + cancellationToken.ThrowIfCancellationRequested(); + return await (AssembleAsync(cacheKey, cachedObject, @event, true)).ConfigureAwait(false); - if (factory.Statistics.IsStatisticsEnabled) + Task AssembleAsync(CacheKey ck, object ce, LoadEvent evt, bool alterStatistics) + { + if (factory.Statistics.IsStatisticsEnabled && alterStatistics) { if (ce == null) { @@ -434,12 +471,12 @@ protected virtual async Task LoadFromSecondLevelCacheAsync(LoadEvent @ev // NH: Different behavior (take a look to options.ExactPersister (NH-295)) if (!options.ExactPersister || persister.EntityMetamodel.SubclassEntityNames.Contains(entry.Subclass)) { - return await (AssembleCacheEntryAsync(entry, @event.EntityId, persister, @event, cancellationToken)).ConfigureAwait(false); + return AssembleCacheEntryAsync(entry, evt.EntityId, persister, evt, cancellationToken); } } - } - return null; + return Task.FromResult(null); + } } private async Task AssembleCacheEntryAsync(CacheEntry entry, object id, IEntityPersister persister, LoadEvent @event, CancellationToken cancellationToken) diff --git a/src/NHibernate/Async/Loader/Loader.cs b/src/NHibernate/Async/Loader/Loader.cs index c18cbca0a7c..737fac4c89b 100644 --- a/src/NHibernate/Async/Loader/Loader.cs +++ b/src/NHibernate/Async/Loader/Loader.cs @@ -375,10 +375,13 @@ internal async Task InitializeEntitiesAndCollectionsAsync(IList hydratedObjects, Log.Debug("total objects hydrated: {0}", hydratedObjectsSize); } + var cacheBatcher = new CacheBatcher(session); for (int i = 0; i < hydratedObjectsSize; i++) { - await (TwoPhaseLoad.InitializeEntityAsync(hydratedObjects[i], readOnly, session, pre, post, cancellationToken)).ConfigureAwait(false); + await (TwoPhaseLoad.InitializeEntityAsync(hydratedObjects[i], readOnly, session, pre, post, + (persister, data) => cacheBatcher.AddToBatch(persister, data), cancellationToken)).ConfigureAwait(false); } + await (cacheBatcher.ExecuteBatchAsync(cancellationToken)).ConfigureAwait(false); } if (collectionPersisters != null) diff --git a/src/NHibernate/Cache/AbstractCacheBatch.cs b/src/NHibernate/Cache/AbstractCacheBatch.cs new file mode 100644 index 00000000000..74f33932670 --- /dev/null +++ b/src/NHibernate/Cache/AbstractCacheBatch.cs @@ -0,0 +1,54 @@ +using System; +using System.Collections.Generic; +using System.Text; +using NHibernate.Engine; + +namespace NHibernate.Cache +{ + /// + /// An abstract batch used for implementing a batch operation of . + /// + internal abstract partial class AbstractCacheBatch + { + public AbstractCacheBatch(ISessionImplementor session, ICacheConcurrencyStrategy cacheConcurrencyStrategy) + { + Session = session; + CacheConcurrencyStrategy = cacheConcurrencyStrategy; + } + + protected ISessionImplementor Session { get; } + + public ICacheConcurrencyStrategy CacheConcurrencyStrategy { get; } + + public abstract int BatchSize { get; } + + public abstract void Execute(); + } + + /// + /// An abstract batch used for implementing a batch operation of . + /// + internal abstract partial class AbstractCacheBatch : AbstractCacheBatch + { + private List _batch = new List(); + + public AbstractCacheBatch(ISessionImplementor session, ICacheConcurrencyStrategy cacheConcurrencyStrategy) + : base(session, cacheConcurrencyStrategy) + { + } + + public void Add(TData data) + { + _batch.Add(data); + } + + public override int BatchSize => _batch.Count; + + public override sealed void Execute() + { + Execute(_batch.ToArray()); + } + + protected abstract void Execute(TData[] data); + } +} diff --git a/src/NHibernate/Cache/CacheBatcher.cs b/src/NHibernate/Cache/CacheBatcher.cs new file mode 100644 index 00000000000..88a6b548636 --- /dev/null +++ b/src/NHibernate/Cache/CacheBatcher.cs @@ -0,0 +1,125 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics; +using System.Text; +using NHibernate.Cache.Access; +using NHibernate.Engine; +using NHibernate.Persister.Collection; +using NHibernate.Persister.Entity; + +namespace NHibernate.Cache +{ + /// + /// A batcher for batching operations of , where the batch size is retrived + /// from an or . + /// When a different persister or a different operation is added to the batch, the current batch will be executed. + /// + internal partial class CacheBatcher + { + private CachePutBatch _putBatch; + private ISessionImplementor _session; + private AbstractCacheBatch _currentBatch; + private object _currentPersister; + + protected static readonly INHibernateLogger Log = NHibernateLogger.For(typeof(CacheBatcher)); + + public CacheBatcher(ISessionImplementor session) + { + _session = session; + } + + /// + /// Adds a put operation to the batch. If the batch size reached the persister batch + /// size, the batch will be executed. + /// + /// The entity persister. + /// The data to put in the cache. + public void AddToBatch(IEntityPersister persister, CachePutData data) + { + if (ShouldExecuteBatch(persister, _putBatch)) + { + ExecuteBatch(); + _currentPersister = persister; + _currentBatch = _putBatch = new CachePutBatch(_session, persister.Cache); + } + if (Log.IsDebugEnabled()) + { + Log.Debug("Adding a put operation to batch for entity {0} and key {1}", persister.EntityName, data.Key); + } + _putBatch.Add(data); + } + + /// + /// Adds a put operation to the batch. If the batch size reached the persister batch + /// size, the batch will be executed. + /// + /// The collection persister. + /// The data to put in the cache. + public void AddToBatch(ICollectionPersister persister, CachePutData data) + { + if (ShouldExecuteBatch(persister, _putBatch)) + { + ExecuteBatch(); + _currentPersister = persister; + _currentBatch = _putBatch = new CachePutBatch(_session, persister.Cache); + } + if (Log.IsDebugEnabled()) + { + Log.Debug("Adding a put operation to batch for collection role {0} and key {1}", persister.Role, data.Key); + } + _putBatch.Add(data); + } + + /// + /// Executes the current batch. + /// + public void ExecuteBatch() + { + if (_currentBatch == null || _currentBatch.BatchSize == 0) + { + return; + } + + try + { + Stopwatch duration = null; + if (Log.IsDebugEnabled()) + { + duration = Stopwatch.StartNew(); + } + _currentBatch.Execute(); + if (Log.IsDebugEnabled() && duration != null) + { + Log.Debug("ExecuteBatch for {0} keys took {1} ms", _currentBatch.BatchSize, duration.ElapsedMilliseconds); + } + } + finally + { + Cleanup(); + } + } + + /// + /// Cleans up the current batch. + /// + public void Cleanup() + { + _putBatch = null; + + _currentBatch = null; + _currentPersister = null; + } + + private bool ShouldExecuteBatch(IEntityPersister persister, AbstractCacheBatch batch) + { + return batch != _currentBatch || _currentPersister != persister || + _currentBatch.BatchSize >= persister.GetBatchSize(); + } + + private bool ShouldExecuteBatch(ICollectionPersister persister, AbstractCacheBatch batch) + { + return batch != _currentBatch || _currentPersister != persister || + _currentBatch.BatchSize >= persister.GetBatchSize(); + } + } +} diff --git a/src/NHibernate/Cache/CachePutBatch.cs b/src/NHibernate/Cache/CachePutBatch.cs new file mode 100644 index 00000000000..96457406f06 --- /dev/null +++ b/src/NHibernate/Cache/CachePutBatch.cs @@ -0,0 +1,48 @@ +using System; +using System.Linq; +using System.Collections; +using System.Collections.Generic; +using System.Text; +using NHibernate.Engine; + +namespace NHibernate.Cache +{ + /// + /// A batch for batching the operation. + /// + internal partial class CachePutBatch : AbstractCacheBatch + { + public CachePutBatch(ISessionImplementor session, ICacheConcurrencyStrategy cacheConcurrencyStrategy) : base(session, cacheConcurrencyStrategy) + { + } + + protected override void Execute(CachePutData[] data) + { + var length = data.Length; + var keys = new CacheKey[length]; + var values = new object[length]; + var versions = new object[length]; + var versionComparers = new IComparer[length]; + var minimalPuts = new bool[length]; + + for (int i = 0; i < length; i++) + { + var item = data[i]; + keys[i] = item.Key; + values[i] = item.Value; + versions[i] = item.Version; + versionComparers[i] = item.VersionComparer; + minimalPuts[i] = item.MinimalPut; + } + + var factory = Session.Factory; + var cacheStrategy = CacheConcurrencyStrategy; + var puts = cacheStrategy.PutMany(keys, values, Session.Timestamp, versions, versionComparers, minimalPuts); + + if (factory.Statistics.IsStatisticsEnabled && puts.Any(o => o)) + { + factory.StatisticsImplementor.SecondLevelCachePut(cacheStrategy.RegionName); + } + } + } +} diff --git a/src/NHibernate/Cache/CachePutData.cs b/src/NHibernate/Cache/CachePutData.cs new file mode 100644 index 00000000000..463a3405862 --- /dev/null +++ b/src/NHibernate/Cache/CachePutData.cs @@ -0,0 +1,34 @@ +using System; +using System.Collections; +using System.Collections.Generic; +using System.Text; +using NHibernate.Persister.Collection; +using NHibernate.Persister.Entity; + +namespace NHibernate.Cache +{ + /// + /// The data used to put a value to the 2nd level cache. + /// + internal class CachePutData + { + public CachePutData(CacheKey key, object value, object version, IComparer versionComparer, bool minimalPut) + { + Key = key; + Value = value; + Version = version; + VersionComparer = versionComparer; + MinimalPut = minimalPut; + } + + public CacheKey Key { get; } + + public object Value { get; } + + public object Version { get; } + + public IComparer VersionComparer { get; } + + public bool MinimalPut { get; } + } +} diff --git a/src/NHibernate/Cache/IBatchableCache.cs b/src/NHibernate/Cache/IBatchableCache.cs new file mode 100644 index 00000000000..b85dc83c9cf --- /dev/null +++ b/src/NHibernate/Cache/IBatchableCache.cs @@ -0,0 +1,41 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace NHibernate.Cache +{ + /// + /// Defines methods for retrieving and adding multiple objects from/to the cache at once. + /// The implementor should use this interface along with when the + /// cache supports a multiple get and put operation. + /// + /// + /// + /// + /// All implementations must be threadsafe. + /// + /// + public partial interface IBatchableCache : IBatchableReadOnlyCache + { + /// + /// Add multiple objects to the cache. + /// + /// The keys to cache. + /// The objects to cache. + void PutMany(object[] keys, object[] values); + + /// + /// Lock the objects from being changed by another thread. + /// + /// The keys to lock. + /// The value that was used to lock the keys. + object LockMany(object[] keys); + + /// + /// Unlock the objects that were previously locked. + /// + /// The keys to unlock. + /// The value that was used to lock the keys. + void UnlockMany(object[] keys, object lockValue); + } +} diff --git a/src/NHibernate/Cache/IBatchableCacheConcurrencyStrategy.cs b/src/NHibernate/Cache/IBatchableCacheConcurrencyStrategy.cs new file mode 100644 index 00000000000..56f15f6a9a8 --- /dev/null +++ b/src/NHibernate/Cache/IBatchableCacheConcurrencyStrategy.cs @@ -0,0 +1,47 @@ +using System; +using System.Collections; +using System.Text; +using NHibernate.Cache.Entry; + +namespace NHibernate.Cache +{ + /// + /// Implementors manage transactional access to cached data. + /// + /// + /// + /// Transactions pass in a timestamp indicating transaction start time. + /// + /// + /// When used to cache entities and collections the key is the identifier of the + /// entity/collection and the value should be set to the + /// for an entity and the results of + /// for a collection. + /// + /// + public partial interface IBatchableCacheConcurrencyStrategy : ICacheConcurrencyStrategy + { + /// + /// Attempt to retrieve multiple objects from the Cache + /// + /// The keys (id) of the objects to get out of the Cache. + /// A timestamp prior to the transaction start time + /// An array of cached objects or + /// + object[] GetMany(CacheKey[] keys, long timestamp); + + /// + /// Attempt to cache objects, after loading them from the database. + /// + /// The keys (id) of the objects to put in the Cache. + /// The objects to put in the cache. + /// A timestamp prior to the transaction start time. + /// The version numbers of the objects we are putting. + /// The comparers to be used to compare version numbers + /// Indicates that the cache should avoid a put if the item is already cached. + /// if the objects were successfully cached. + /// + bool[] PutMany(CacheKey[] keys, object[] values, long timestamp, object[] versions, IComparer[] versionComparers, + bool[] minimalPuts); + } +} diff --git a/src/NHibernate/Cache/IBatchableReadOnlyCache.cs b/src/NHibernate/Cache/IBatchableReadOnlyCache.cs new file mode 100644 index 00000000000..7bdbaa07bce --- /dev/null +++ b/src/NHibernate/Cache/IBatchableReadOnlyCache.cs @@ -0,0 +1,27 @@ +using System; +using System.Collections.Generic; +using System.Text; + +namespace NHibernate.Cache +{ + /// + /// Defines a method for retrieving multiple objects from the cache at once. The implementor + /// should use this interface along with when the cache supports + /// a multiple get operation. + /// + /// + /// + /// + /// All implementations must be threadsafe. + /// + /// + public partial interface IBatchableReadOnlyCache + { + /// + /// Get multiple objects from the cache. + /// + /// The keys to be retrieved from the cache. + /// + object[] GetMany(object[] keys); + } +} diff --git a/src/NHibernate/Cache/ICacheConcurrencyStrategy.cs b/src/NHibernate/Cache/ICacheConcurrencyStrategy.cs index 624838f5d38..d22862d128a 100644 --- a/src/NHibernate/Cache/ICacheConcurrencyStrategy.cs +++ b/src/NHibernate/Cache/ICacheConcurrencyStrategy.cs @@ -1,3 +1,4 @@ +using System; using System.Collections; using NHibernate.Cache.Access; using NHibernate.Cache.Entry; @@ -139,4 +140,58 @@ public partial interface ICacheConcurrencyStrategy /// The for this strategy to use. ICache Cache { get; set; } } -} \ No newline at end of file + + internal static partial class CacheConcurrencyStrategyExtensions + { + /// + /// Attempt to retrieve multiple objects from the Cache + /// + /// The cache concurrency strategy. + /// The keys (id) of the objects to get out of the Cache. + /// A timestamp prior to the transaction start time + /// An array of cached objects or + /// + public static object[] GetMany(this ICacheConcurrencyStrategy cache, CacheKey[] keys, long timestamp) + { + if (!(cache is IBatchableCacheConcurrencyStrategy batchableCache)) + { + throw new InvalidOperationException($"Cache concurrency strategy {cache.GetType()} does not support batching"); + } + return batchableCache.GetMany(keys, timestamp); + } + + /// + /// Attempt to cache objects, after loading them from the database. + /// + /// The cache concurrency strategy. + /// The keys (id) of the objects to put in the Cache. + /// The objects to put in the cache. + /// A timestamp prior to the transaction start time. + /// The version numbers of the objects we are putting. + /// The comparers to be used to compare version numbers + /// Indicates that the cache should avoid a put if the item is already cached. + /// if the objects were successfully cached. + /// + public static bool[] PutMany(this ICacheConcurrencyStrategy cache, CacheKey[] keys, object[] values, long timestamp, + object[] versions, IComparer[] versionComparers, bool[] minimalPuts) + { + if (!(cache is IBatchableCacheConcurrencyStrategy batchableCache)) + { + throw new InvalidOperationException($"Cache concurrency strategy {cache.GetType()} does not support batching"); + } + return batchableCache.PutMany(keys, values, timestamp, versions, versionComparers, minimalPuts); + } + + public static bool IsBatchingGetSupported(this ICacheConcurrencyStrategy cache) + { + // ReSharper disable once SuspiciousTypeConversion.Global + return cache.Cache is IBatchableReadOnlyCache && cache is IBatchableCacheConcurrencyStrategy; + } + + public static bool IsBatchingPutSupported(this ICacheConcurrencyStrategy cache) + { + // ReSharper disable once SuspiciousTypeConversion.Global + return cache.Cache is IBatchableCache && cache is IBatchableCacheConcurrencyStrategy; + } + } +} diff --git a/src/NHibernate/Cache/NonstrictReadWriteCache.cs b/src/NHibernate/Cache/NonstrictReadWriteCache.cs index f131ac41902..ff7f6ce7ece 100644 --- a/src/NHibernate/Cache/NonstrictReadWriteCache.cs +++ b/src/NHibernate/Cache/NonstrictReadWriteCache.cs @@ -1,5 +1,7 @@ using System; using System.Collections; +using System.Collections.Generic; +using System.Linq; using NHibernate.Cache.Access; namespace NHibernate.Cache @@ -12,9 +14,11 @@ namespace NHibernate.Cache /// This is an "asynchronous" concurrency strategy. /// for a much stricter algorithm /// - public partial class NonstrictReadWriteCache : ICacheConcurrencyStrategy + public partial class NonstrictReadWriteCache : IBatchableCacheConcurrencyStrategy { private ICache cache; + private IBatchableReadOnlyCache _batchableReadOnlyCache; + private IBatchableCache _batchableCache; private static readonly INHibernateLogger log = NHibernateLogger.For(typeof(NonstrictReadWriteCache)); @@ -29,7 +33,13 @@ public string RegionName public ICache Cache { get { return cache; } - set { cache = value; } + set + { + cache = value; + // ReSharper disable once SuspiciousTypeConversion.Global + _batchableReadOnlyCache = value as IBatchableReadOnlyCache; + _batchableCache = value as IBatchableCache; + } } /// @@ -54,6 +64,94 @@ public object Get(CacheKey key, long txTimestamp) return result; } + public object[] GetMany(CacheKey[] keys, long timestamp) + { + if (_batchableReadOnlyCache == null) + { + throw new InvalidOperationException($"Cache {cache.GetType()} does not support batching get operation"); + } + if (log.IsDebugEnabled()) + { + log.Debug("Cache lookup: {0}", string.Join(",", keys.AsEnumerable())); + } + var results = _batchableReadOnlyCache.GetMany(keys.Select(o => (object) o).ToArray()); + if (!log.IsDebugEnabled()) + { + return results; + } + for (var i = 0; i < keys.Length; i++) + { + log.Debug(results[i] != null ? $"Cache hit: {keys[i]}" : $"Cache miss: {keys[i]}"); + } + return results; + } + + /// + /// Add multiple items to the cache + /// + public bool[] PutMany(CacheKey[] keys, object[] values, long timestamp, object[] versions, IComparer[] versionComparers, + bool[] minimalPuts) + { + if (_batchableCache == null) + { + throw new InvalidOperationException($"Cache {cache.GetType()} does not support batching operations"); + } + var result = new bool[keys.Length]; + if (timestamp == long.MinValue) + { + // MinValue means cache is disabled + return result; + } + + var checkKeys = new List(); + var checkKeyIndexes = new List(); + for (var i = 0; i < minimalPuts.Length; i++) + { + if (minimalPuts[i]) + { + checkKeys.Add(keys[i]); + checkKeyIndexes.Add(i); + } + } + var skipKeyIndexes = new HashSet(); + if (checkKeys.Any()) + { + var objects = _batchableCache.GetMany(checkKeys.ToArray()); + for (var i = 0; i < objects.Length; i++) + { + if (objects[i] != null) + { + if (log.IsDebugEnabled()) + { + log.Debug("item already cached: {0}", checkKeys[i]); + } + skipKeyIndexes.Add(checkKeyIndexes[i]); + } + } + } + + if (skipKeyIndexes.Count == keys.Length) + { + return result; + } + + var putKeys = new object[keys.Length - skipKeyIndexes.Count]; + var putValues = new object[putKeys.Length]; + var j = 0; + for (var i = 0; i < keys.Length; i++) + { + if (skipKeyIndexes.Contains(i)) + { + continue; + } + putKeys[j] = keys[i]; + putValues[j++] = values[i]; + result[i] = true; + } + _batchableCache.PutMany(putKeys, putValues); + return result; + } + /// /// Add an item to the cache /// diff --git a/src/NHibernate/Cache/ReadOnlyCache.cs b/src/NHibernate/Cache/ReadOnlyCache.cs index 5bce536d4b9..e5df5e3eead 100644 --- a/src/NHibernate/Cache/ReadOnlyCache.cs +++ b/src/NHibernate/Cache/ReadOnlyCache.cs @@ -1,5 +1,7 @@ using System; using System.Collections; +using System.Collections.Generic; +using System.Linq; using NHibernate.Cache.Access; namespace NHibernate.Cache @@ -7,9 +9,11 @@ namespace NHibernate.Cache /// /// Caches data that is never updated /// - public partial class ReadOnlyCache : ICacheConcurrencyStrategy + public partial class ReadOnlyCache : IBatchableCacheConcurrencyStrategy { private ICache cache; + private IBatchableReadOnlyCache _batchableReadOnlyCache; + private IBatchableCache _batchableCache; private static readonly INHibernateLogger log = NHibernateLogger.For(typeof(ReadOnlyCache)); /// @@ -23,7 +27,13 @@ public string RegionName public ICache Cache { get { return cache; } - set { cache = value; } + set + { + cache = value; + // ReSharper disable once SuspiciousTypeConversion.Global + _batchableReadOnlyCache = value as IBatchableReadOnlyCache; + _batchableCache = value as IBatchableCache; + } } public object Get(CacheKey key, long timestamp) @@ -36,6 +46,28 @@ public object Get(CacheKey key, long timestamp) return result; } + public object[] GetMany(CacheKey[] keys, long timestamp) + { + if (_batchableReadOnlyCache == null) + { + throw new InvalidOperationException($"Cache {cache.GetType()} does not support batching get operation"); + } + if (log.IsDebugEnabled()) + { + log.Debug("Cache lookup: {0}", string.Join(",", keys.AsEnumerable())); + } + var results = _batchableReadOnlyCache.GetMany(keys.Select(o => (object) o).ToArray()); + if (!log.IsDebugEnabled()) + { + return results; + } + for (var i = 0; i < keys.Length; i++) + { + log.Debug(results[i] != null ? $"Cache hit: {keys[i]}" : $"Cache miss: {keys[i]}"); + } + return results; + } + /// /// Unsupported! /// @@ -45,6 +77,69 @@ public ISoftLock Lock(CacheKey key, object version) throw new InvalidOperationException("ReadOnlyCache: Can't write to a readonly object " + key.EntityOrRoleName); } + public bool[] PutMany(CacheKey[] keys, object[] values, long timestamp, object[] versions, IComparer[] versionComparers, + bool[] minimalPuts) + { + if (_batchableCache == null) + { + throw new InvalidOperationException($"Cache {cache.GetType()} does not support batching operations"); + } + var result = new bool[keys.Length]; + if (timestamp == long.MinValue) + { + // MinValue means cache is disabled + return result; + } + + var checkKeys = new List(); + var checkKeyIndexes = new List(); + for (var i = 0; i < minimalPuts.Length; i++) + { + if (minimalPuts[i]) + { + checkKeys.Add(keys[i]); + checkKeyIndexes.Add(i); + } + } + var skipKeyIndexes = new HashSet(); + if (checkKeys.Any()) + { + var objects = _batchableCache.GetMany(checkKeys.Select(o => (object) o).ToArray()); + for (var i = 0; i < objects.Length; i++) + { + if (objects[i] != null) + { + if (log.IsDebugEnabled()) + { + log.Debug("item already cached: {0}", checkKeys[i]); + } + skipKeyIndexes.Add(checkKeyIndexes[i]); + } + } + } + + if (skipKeyIndexes.Count == keys.Length) + { + return result; + } + + var putKeys = new object[keys.Length - skipKeyIndexes.Count]; + var putValues = new object[putKeys.Length]; + var j = 0; + for (var i = 0; i < keys.Length; i++) + { + if (skipKeyIndexes.Contains(i)) + { + continue; + } + putKeys[j] = keys[i]; + putValues[j++] = values[i]; + result[i] = true; + } + _batchableCache.PutMany(putKeys, putValues); + return result; + } + public bool Put(CacheKey key, object value, long timestamp, object version, IComparer versionComparator, bool minimalPut) { diff --git a/src/NHibernate/Cache/ReadWriteCache.cs b/src/NHibernate/Cache/ReadWriteCache.cs index 98da04caa17..fa66ed9bd16 100644 --- a/src/NHibernate/Cache/ReadWriteCache.cs +++ b/src/NHibernate/Cache/ReadWriteCache.cs @@ -1,5 +1,7 @@ using System; using System.Collections; +using System.Collections.Generic; +using System.Linq; using NHibernate.Cache.Access; namespace NHibernate.Cache @@ -20,7 +22,7 @@ namespace NHibernate.Cache /// for a faster algorithm /// /// - public partial class ReadWriteCache : ICacheConcurrencyStrategy + public partial class ReadWriteCache : IBatchableCacheConcurrencyStrategy { public interface ILockable { @@ -34,6 +36,8 @@ public interface ILockable private readonly object _lockObject = new object(); private ICache cache; + private IBatchableReadOnlyCache _batchableReadOnlyCache; + private IBatchableCache _batchableCache; private int _nextLockId; public ReadWriteCache() @@ -51,7 +55,13 @@ public string RegionName public ICache Cache { get { return cache; } - set { cache = value; } + set + { + cache = value; + // ReSharper disable once SuspiciousTypeConversion.Global + _batchableReadOnlyCache = value as IBatchableReadOnlyCache; + _batchableCache = value as IBatchableCache; + } } /// @@ -136,6 +146,45 @@ public object Get(CacheKey key, long txTimestamp) } } + public object[] GetMany(CacheKey[] keys, long timestamp) + { + if (_batchableReadOnlyCache == null) + { + throw new InvalidOperationException($"Cache {cache.GetType()} does not support batching get operation"); + } + if (log.IsDebugEnabled()) + { + log.Debug("Cache lookup: {0}", string.Join(",", keys.AsEnumerable())); + } + var result = new object[keys.Length]; + lock (_lockObject) + { + var lockables = _batchableReadOnlyCache.GetMany(keys.Select(o => (object) o).ToArray()); + for (var i = 0; i < lockables.Length; i++) + { + var lockable = (ILockable) lockables[i]; + var gettable = lockable != null && lockable.IsGettable(timestamp); + + if (gettable) + { + if (log.IsDebugEnabled()) + { + log.Debug("Cache hit: {0}", keys[i]); + } + result[i] = ((CachedItem) lockable).Value; + } + + if (log.IsDebugEnabled()) + { + log.Debug(lockable == null ? "Cache miss: {0}" : "Cached item was locked: {0}", keys[i]); + } + + result[i] = null; + } + } + return result; + } + /// /// Stop any other transactions reading or writing this item to/from /// the cache. Send them straight to the database instead. (The lock @@ -171,6 +220,92 @@ public ISoftLock Lock(CacheKey key, object version) } } + /// + /// Do not add an item to the cache unless the current transaction + /// timestamp is later than the timestamp at which the item was + /// invalidated. (Otherwise, a stale item might be re-added if the + /// database is operating in repeatable read isolation mode.) + /// + /// Whether the items were actually put into the cache + public bool[] PutMany(CacheKey[] keys, object[] values, long timestamp, object[] versions, IComparer[] versionComparers, + bool[] minimalPuts) + { + if (_batchableCache == null) + { + throw new InvalidOperationException($"Cache {cache.GetType()} does not support batching operations"); + } + + var result = new bool[keys.Length]; + if (timestamp == long.MinValue) + { + // MinValue means cache is disabled + return result; + } + + lock (_lockObject) + { + if (log.IsDebugEnabled()) + { + log.Debug("Caching: {0}", string.Join(",", keys.AsEnumerable())); + } + var keysArr = keys.Cast().ToArray(); + var lockAquired = false; + object lockValue = null; + try + { + lockValue = _batchableCache.LockMany(keysArr); + lockAquired = true; + var putBatch = new Dictionary(); + var lockables = _batchableCache.GetMany(keysArr); + for (var i = 0; i < keys.Length; i++) + { + var key = keys[i]; + var version = versions[i]; + var lockable = (ILockable) lockables[i]; + bool puttable = lockable == null || + lockable.IsPuttable(timestamp, version, versionComparers[i]); + if (puttable) + { + putBatch.Add(key, new CachedItem(values[i], cache.NextTimestamp(), version)); + if (log.IsDebugEnabled()) + { + log.Debug("Cached: {0}", key); + } + result[i] = true; + } + else + { + if (log.IsDebugEnabled()) + { + if (lockable.IsLock) + { + log.Debug("Item was locked: {0}", key); + } + else + { + log.Debug("Item was already cached: {0}", key); + } + } + result[i] = false; + } + } + + if (putBatch.Count > 0) + { + _batchableCache.PutMany(putBatch.Keys.ToArray(), putBatch.Values.ToArray()); + } + } + finally + { + if (lockAquired) + { + _batchableCache.UnlockMany(keysArr, lockValue); + } + } + } + return result; + } + /// /// Do not add an item to the cache unless the current transaction /// timestamp is later than the timestamp at which the item was diff --git a/src/NHibernate/Cache/UpdateTimestampsCache.cs b/src/NHibernate/Cache/UpdateTimestampsCache.cs index 450ea3ce469..8641bbdab1b 100644 --- a/src/NHibernate/Cache/UpdateTimestampsCache.cs +++ b/src/NHibernate/Cache/UpdateTimestampsCache.cs @@ -18,6 +18,7 @@ public partial class UpdateTimestampsCache { private static readonly INHibernateLogger log = NHibernateLogger.For(typeof(UpdateTimestampsCache)); private ICache updateTimestamps; + private readonly IBatchableReadOnlyCache _batchUpdateTimestamps; private readonly string regionName = typeof(UpdateTimestampsCache).Name; @@ -32,6 +33,8 @@ public UpdateTimestampsCache(Settings settings, IDictionary prop regionName = prefix == null ? regionName : prefix + '.' + regionName; log.Info("starting update timestamps cache at region: {0}", regionName); updateTimestamps = settings.CacheProvider.BuildCache(regionName, props); + // ReSharper disable once SuspiciousTypeConversion.Global + _batchUpdateTimestamps = updateTimestamps as IBatchableReadOnlyCache; } //Since v5.1 @@ -79,35 +82,33 @@ public virtual void Invalidate(IReadOnlyCollection spaces) [MethodImpl(MethodImplOptions.Synchronized)] public virtual bool IsUpToDate(ISet spaces, long timestamp /* H2.1 has Long here */) { - foreach (string space in spaces) + if (_batchUpdateTimestamps != null) { - object lastUpdate = updateTimestamps.Get(space); - if (lastUpdate == null) + var keys = new object[spaces.Count]; + var index = 0; + foreach (var space in spaces) { - //the last update timestamp was lost from the cache - //(or there were no updates since startup!) - - //NOTE: commented out, since users found the "safe" behavior - // counter-intuitive when testing, and we couldn't deal - // with all the forum posts :-( - //updateTimestamps.put( space, new Long( updateTimestamps.nextTimestamp() ) ); - //result = false; // safer - - //OR: put a timestamp there, to avoid subsequent expensive - // lookups to a distributed cache - this is no good, since - // it is non-threadsafe (could hammer effect of an actual - // invalidation), and because this is not the way our - // preferred distributed caches work (they work by - // replication) - //updateTimestamps.put( space, new Long(Long.MIN_VALUE) ); + keys[index++] = space; } - else + var lastUpdates = _batchUpdateTimestamps.GetMany(keys); + foreach (var lastUpdate in lastUpdates) { - if ((long) lastUpdate >= timestamp) + if (IsOutdated(lastUpdate, timestamp)) { return false; } } + return true; + } + + foreach (string space in spaces) + { + object lastUpdate = updateTimestamps.Get(space); + if (IsOutdated(lastUpdate, timestamp)) + { + return false; + } + } return true; } @@ -123,5 +124,37 @@ public virtual void Destroy() log.Warn(e, "could not destroy UpdateTimestamps cache"); } } + + private bool IsOutdated(object lastUpdate, long timestamp) + { + if (lastUpdate == null) + { + //the last update timestamp was lost from the cache + //(or there were no updates since startup!) + + //NOTE: commented out, since users found the "safe" behavior + // counter-intuitive when testing, and we couldn't deal + // with all the forum posts :-( + //updateTimestamps.put( space, new Long( updateTimestamps.nextTimestamp() ) ); + //result = false; // safer + + //OR: put a timestamp there, to avoid subsequent expensive + // lookups to a distributed cache - this is no good, since + // it is non-threadsafe (could hammer effect of an actual + // invalidation), and because this is not the way our + // preferred distributed caches work (they work by + // replication) + //updateTimestamps.put( space, new Long(Long.MIN_VALUE) ); + } + else + { + if ((long) lastUpdate >= timestamp) + { + return true; + } + } + + return false; + } } } diff --git a/src/NHibernate/Engine/BatchFetchQueue.cs b/src/NHibernate/Engine/BatchFetchQueue.cs index dd668788379..1c02286959b 100644 --- a/src/NHibernate/Engine/BatchFetchQueue.cs +++ b/src/NHibernate/Engine/BatchFetchQueue.cs @@ -1,3 +1,4 @@ +using System; using System.Collections; using NHibernate.Cache; using NHibernate.Collection; @@ -5,6 +6,7 @@ using NHibernate.Persister.Entity; using NHibernate.Util; using System.Collections.Generic; +using System.Linq; using Iesi.Collections.Generic; namespace NHibernate.Engine @@ -138,6 +140,17 @@ public void RemoveBatchLoadableEntityKey(EntityKey key) set.Remove(key); } } + // A subclass will be added to the batch by the root entity name, when querying by the root entity. + // When removing a subclass key, we need to consider that the subclass may not be batchable but + // its root class may be. In order to prevent having in batch entity keys that are already loaded, + // we have to try to remove the key by the root entity, even if the subclass is not batchable. + if (key.RootEntityName != key.EntityName) + { + if (batchLoadableEntityKeys.TryGetValue(key.RootEntityName, out var set)) + { + set.Remove(key); + } + } } /// @@ -158,6 +171,25 @@ public void AddBatchLoadableCollection(IPersistentCollection collection, Collect map[ce] = collection; } + /// + /// Retrives the uninitialized persistent collection from the queue. + /// + /// The collection persister. + /// The collection entry. + /// A persistent collection if found, otherwise. + internal IPersistentCollection GetBatchLoadableCollection(ICollectionPersister persister, CollectionEntry ce) + { + if (!batchLoadableCollections.TryGetValue(persister.Role, out var map)) + { + return null; + } + if (!map.TryGetValue(ce, out var collection)) + { + return null; + } + return collection; + } + /// /// After a collection was initialized or evicted, we don't /// need to batch fetch it anymore, remove it from the queue @@ -181,64 +213,176 @@ public void RemoveBatchLoadableCollection(CollectionEntry ce) /// an array of collection keys, of length batchSize (padded with nulls) public object[] GetCollectionBatch(ICollectionPersister collectionPersister, object id, int batchSize) { - object[] keys = new object[batchSize]; - keys[0] = id; - int i = 1; - int end = -1; - bool checkForEnd = false; + return GetCollectionBatch(collectionPersister, id, batchSize, true, null); + } + + /// + /// Get a batch of uninitialized collection keys for a given role + /// + /// The persister for the collection role. + /// A key that must be included in the batch fetch + /// the maximum number of keys to return + /// Whether to check the cache for uninitialized collection keys. + /// An array that will be filled with collection entries if set. + /// An array of collection keys, of length (padded with nulls) + internal object[] GetCollectionBatch(ICollectionPersister collectionPersister, object id, int batchSize, bool checkCache, + CollectionEntry[] collectionEntries) + { + var keys = new object[batchSize]; + keys[0] = id; // The first element of array is reserved for the actual instance we are loading + var i = 1; // The current index of keys array + int? keyIndex = null; // The index of the demanding key in the linked hash set + var checkForEnd = false; // Stores whether we found the demanded collection and reached the batchSize + var index = 0; // The current index of the linked hash set iteration + // List of collection entries that haven't been checked for their existance in the cache. Besides the collection entry, + // the index where the entry was found is also stored in order to correctly order the returning keys. + var collectionKeys = new List, int>>(batchSize); + var batchableCache = collectionPersister.Cache?.Cache as IBatchableReadOnlyCache; + + if (!batchLoadableCollections.TryGetValue(collectionPersister.Role, out var map)) + { + return keys; + } - if (batchLoadableCollections.TryGetValue(collectionPersister.Role, out var map)) + foreach (KeyValuePair me in map) { - foreach (KeyValuePair me in map) + if (ProcessKey(me)) { - var ce = me.Key; - var collection = me.Value; - if (ce.LoadedKey == null) - { - // the LoadedKey of the CollectionEntry might be null as it might have been reset to null - // (see for example Collections.ProcessDereferencedCollection() - // and CollectionEntry.AfterAction()) - // though we clear the queue on flush, it seems like a good idea to guard - // against potentially null LoadedKey:s - continue; - } + return keys; + } + index++; + } + + // If by the end of the iteration we haven't filled the whole array of keys to fetch, + // we have to check the remaining collection keys. + while (i != batchSize && collectionKeys.Count > 0) + { + if (CheckCacheAndProcessResult()) + { + return keys; + } + } - if (collection.WasInitialized) + return keys; //we ran out of keys to try + + // Calls the cache to check if any of the keys is cached and continues the key processing for those + // that are not stored in the cache. + bool CheckCacheAndProcessResult() + { + var fromIndex = batchableCache != null + ? collectionKeys.Count - Math.Min(batchSize, collectionKeys.Count) + : 0; + var toIndex = collectionKeys.Count - 1; + var indexes = GetSortedKeyIndexes(collectionKeys, keyIndex.Value, fromIndex, toIndex); + if (batchableCache == null) + { + for (var j = 0; j < collectionKeys.Count; j++) { - log.Warn("Encountered initialized collection in BatchFetchQueue, this should not happen."); - continue; + if (ProcessKey(collectionKeys[indexes[j]].Key)) + { + return true; + } } - - if (checkForEnd && i == end) + } + else + { + var results = AreCached(collectionKeys, indexes, collectionPersister, batchableCache, checkCache); + var k = toIndex; + for (var j = 0; j < results.Length; j++) { - return keys; //the first key found after the given key + if (!results[j] && ProcessKey(collectionKeys[indexes[j]].Key, true)) + { + return true; + } } + } + + for (var j = toIndex; j >= fromIndex; j--) + { + collectionKeys.RemoveAt(j); + } + return false; + } + + bool ProcessKey(KeyValuePair me, bool ignoreCache = false) + { + var ce = me.Key; + var collection = me.Value; + if (ce.LoadedKey == null) + { + // the LoadedKey of the CollectionEntry might be null as it might have been reset to null + // (see for example Collections.ProcessDereferencedCollection() + // and CollectionEntry.AfterAction()) + // though we clear the queue on flush, it seems like a good idea to guard + // against potentially null LoadedKey:s + return false; + } - bool isEqual = collectionPersister.KeyType.IsEqual(id, ce.LoadedKey, collectionPersister.Factory); + if (collection.WasInitialized) + { + log.Warn("Encountered initialized collection in BatchFetchQueue, this should not happen."); + return false; + } - if (isEqual) + if (checkForEnd && (index >= keyIndex.Value + batchSize || index == map.Count)) + { + return true; + } + if (collectionPersister.KeyType.IsEqual(id, ce.LoadedKey, collectionPersister.Factory)) + { + if (collectionEntries != null) { - end = i; - //checkForEnd = false; + collectionEntries[0] = ce; } - else if (!IsCached(ce.LoadedKey, collectionPersister)) + keyIndex = index; + } + else if (!checkCache || batchableCache == null) + { + if (!keyIndex.HasValue || index < keyIndex.Value) { - keys[i++] = ce.LoadedKey; - //count++; + collectionKeys.Add(new KeyValuePair, int>(me, index)); + return false; } - if (i == batchSize) + if (!checkCache || !IsCached(ce.LoadedKey, collectionPersister)) { - i = 1; //end of array, start filling again from start - if (end != -1) + if (collectionEntries != null) { - checkForEnd = true; + collectionEntries[i] = ce; } + keys[i++] = ce.LoadedKey; + } + } + else if (ignoreCache) + { + if (collectionEntries != null) + { + collectionEntries[i] = ce; + } + keys[i++] = ce.LoadedKey; + } + else + { + collectionKeys.Add(new KeyValuePair, int>(me, index)); + // Check the cache only when we have collected as many keys as are needed to fill the batch, + // that are after the demanded key. + if (!keyIndex.HasValue || index < keyIndex.Value + batchSize) + { + return false; + } + return CheckCacheAndProcessResult(); + } + if (i == batchSize) + { + i = 1; // End of array, start filling again from start + if (keyIndex.HasValue) + { + checkForEnd = true; + return index >= keyIndex.Value + batchSize || index == map.Count; } } + return false; } - - return keys; //we ran out of keys to try } /// @@ -252,42 +396,147 @@ public object[] GetCollectionBatch(ICollectionPersister collectionPersister, obj /// an array of identifiers, of length batchSize (possibly padded with nulls) public object[] GetEntityBatch(IEntityPersister persister, object id, int batchSize) { - object[] ids = new object[batchSize]; - ids[0] = id; //first element of array is reserved for the actual instance we are loading! - int i = 1; - int end = -1; - bool checkForEnd = false; + return GetEntityBatch(persister, id, batchSize, true); + } - if (batchLoadableEntityKeys.TryGetValue(persister.EntityName, out var set)) + /// + /// Get a batch of unloaded identifiers for this class, using a slightly + /// complex algorithm that tries to grab keys registered immediately after + /// the given key. + /// + /// The persister for the entities being loaded. + /// The identifier of the entity currently demanding load. + /// The maximum number of keys to return + /// Whether to check the cache for uninitialized keys. + /// An array of identifiers, of length (possibly padded with nulls) + internal object[] GetEntityBatch(IEntityPersister persister, object id, int batchSize, bool checkCache) + { + var ids = new object[batchSize]; + ids[0] = id; // The first element of array is reserved for the actual instance we are loading + var i = 1; // The current index of ids array + int? idIndex = null; // The index of the demanding id in the linked hash set + var checkForEnd = false; // Stores whether we found the demanded id and reached the batchSize + var index = 0; // The current index of the linked hash set iteration + // List of entity keys that haven't been checked for their existance in the cache. Besides the entity key, + // the index where the key was found is also stored in order to correctly order the returning keys. + var entityKeys = new List>(batchSize); + var batchableCache = persister.Cache?.Cache as IBatchableReadOnlyCache; + + if (!batchLoadableEntityKeys.TryGetValue(persister.EntityName, out var set)) + { + return ids; + } + + foreach (var key in set) { - foreach (var key in set) + if (ProcessKey(key)) { - //TODO: this needn't exclude subclasses... - if (checkForEnd && i == end) - { - //the first id found after the given id - return ids; - } - if (persister.IdentifierType.IsEqual(id, key.Identifier)) + return ids; + } + index++; + } + + // If by the end of the iteration we haven't filled the whole array of ids to fetch, + // we have to check the remaining entity keys. + while (i != batchSize && entityKeys.Count > 0) + { + if (CheckCacheAndProcessResult()) + { + return ids; + } + } + + return ids; + + // Calls the cache to check if any of the keys is cached and continues the key processing for those + // that are not stored in the cache. + bool CheckCacheAndProcessResult() + { + var fromIndex = batchableCache != null + ? entityKeys.Count - Math.Min(batchSize, entityKeys.Count) + : 0; + var toIndex = entityKeys.Count - 1; + var indexes = GetSortedKeyIndexes(entityKeys, idIndex.Value, fromIndex, toIndex); + if (batchableCache == null) + { + for (var j = 0; j < entityKeys.Count; j++) { - end = i; + if (ProcessKey(entityKeys[indexes[j]].Key)) + { + return true; + } } - else + } + else + { + var results = AreCached(entityKeys, indexes, persister, batchableCache, checkCache); + var k = toIndex; + for (var j = 0; j < results.Length; j++) { - if (!IsCached(key, persister)) + if (!results[j] && ProcessKey(entityKeys[indexes[j]].Key, true)) { - ids[i++] = key.Identifier; + return true; } } - if (i == batchSize) + } + + for (var j = toIndex; j >= fromIndex; j--) + { + entityKeys.RemoveAt(j); + } + return false; + } + + bool ProcessKey(EntityKey key, bool ignoreCache = false) + { + //TODO: this needn't exclude subclasses... + if (checkForEnd && (index >= idIndex.Value + batchSize || index == set.Count)) + { + return true; + } + if (persister.IdentifierType.IsEqual(id, key.Identifier)) + { + idIndex = index; + } + else if (!checkCache || batchableCache == null) + { + if (!idIndex.HasValue || index < idIndex.Value) + { + entityKeys.Add(new KeyValuePair(key, index)); + return false; + } + + if (!checkCache || !IsCached(key, persister)) { - i = 1; //end of array, start filling again from start - if (end != -1) - checkForEnd = true; + ids[i++] = key.Identifier; } } + else if (ignoreCache) + { + ids[i++] = key.Identifier; + } + else + { + entityKeys.Add(new KeyValuePair(key, index)); + // Check the cache only when we have collected as many keys as are needed to fill the batch, + // that are after the demanded key. + if (!idIndex.HasValue || index < idIndex.Value + batchSize) + { + return false; + } + return CheckCacheAndProcessResult(); + } + if (i == batchSize) + { + i = 1; // End of array, start filling again from start + if (idIndex.HasValue) + { + checkForEnd = true; + return index >= idIndex.Value + batchSize || index == set.Count; + } + } + return false; } - return ids; //we ran out of ids to try } private bool IsCached(EntityKey entityKey, IEntityPersister persister) @@ -309,5 +558,111 @@ private bool IsCached(object collectionKey, ICollectionPersister persister) } return false; } + + /// + /// Checks whether the given entity key indexes are cached. + /// + /// The list of pairs of entity keys and thier indexes. + /// The array of indexes of that have to be checked. + /// The entity persister. + /// The batchable cache. + /// Whether to check the cache or just return for all keys. + /// An array of booleans that contains the result for each key. + private bool[] AreCached(List> entityKeys, int[] keyIndexes, IEntityPersister persister, + IBatchableReadOnlyCache batchableCache, bool checkCache) + { + var result = new bool[keyIndexes.Length]; + if (!checkCache || !persister.HasCache || !context.Session.CacheMode.HasFlag(CacheMode.Get)) + { + return result; + } + var cacheKeys = new object[keyIndexes.Length]; + var i = 0; + foreach (var index in keyIndexes) + { + var entityKey = entityKeys[index].Key; + cacheKeys[i++] = context.Session.GenerateCacheKey( + entityKey.Identifier, + persister.IdentifierType, + entityKey.EntityName); + } + var cacheResult = batchableCache.GetMany(cacheKeys); + for (var j = 0; j < result.Length; j++) + { + result[j] = cacheResult[j] != null; + } + + return result; + } + + /// + /// Checks whether the given collection key indexes are cached. + /// + /// The list of pairs of collection entries and thier indexes. + /// The array of indexes of that have to be checked. + /// The collection persister. + /// The batchable cache. + /// Whether to check the cache or just return for all keys. + /// An array of booleans that contains the result for each key. + private bool[] AreCached(List, int>> collectionKeys, + int[] keyIndexes, ICollectionPersister persister, IBatchableReadOnlyCache batchableCache, + bool checkCache) + { + var result = new bool[keyIndexes.Length]; + if (!checkCache || !persister.HasCache || !context.Session.CacheMode.HasFlag(CacheMode.Get)) + { + return result; + } + var cacheKeys = new object[keyIndexes.Length]; + var i = 0; + foreach (var index in keyIndexes) + { + var collectionKey = collectionKeys[index].Key; + cacheKeys[i++] = context.Session.GenerateCacheKey( + collectionKey.Key.LoadedKey, + persister.KeyType, + persister.Role); + } + var cacheResult = batchableCache.GetMany(cacheKeys); + for (var j = 0; j < result.Length; j++) + { + result[j] = cacheResult[j] != null; + } + + return result; + } + + /// + /// Sorts the given keys by thier indexes, where the keys that are after the demanded key will be located + /// at the start and the remaining indexes at the end of the returned array. + /// + /// The type of the key + /// The list of pairs of keys and thier indexes. + /// The index of the demanded key + /// The index where the sorting will begin. + /// The index where the sorting will end. + /// An array of sorted key indexes. + private static int[] GetSortedKeyIndexes(List> keys, int keyIndex, int fromIndex, int toIndex) + { + var result = new int[Math.Abs(toIndex - fromIndex) + 1]; + var lowerIndexes = new List(); + var i = 0; + for (var j = fromIndex; j <= toIndex; j++) + { + if (keys[j].Value < keyIndex) + { + lowerIndexes.Add(j); + } + else + { + result[i++] = j; + } + } + for (var j = lowerIndexes.Count - 1; j >= 0; j--) + { + result[i++] = lowerIndexes[j]; + } + return result; + } } } diff --git a/src/NHibernate/Engine/EntityKey.cs b/src/NHibernate/Engine/EntityKey.cs index 0db22d945cf..041ee9c80dc 100644 --- a/src/NHibernate/Engine/EntityKey.cs +++ b/src/NHibernate/Engine/EntityKey.cs @@ -62,6 +62,11 @@ public string EntityName get { return entityName; } } + internal string RootEntityName + { + get { return rootEntityName; } + } + public override bool Equals(object other) { var otherKey = other as EntityKey; diff --git a/src/NHibernate/Engine/Loading/CollectionLoadContext.cs b/src/NHibernate/Engine/Loading/CollectionLoadContext.cs index 2e7604d4948..89f154af1bd 100644 --- a/src/NHibernate/Engine/Loading/CollectionLoadContext.cs +++ b/src/NHibernate/Engine/Loading/CollectionLoadContext.cs @@ -1,3 +1,4 @@ +using System; using System.Collections; using System.Collections.Generic; using System.Data.Common; @@ -219,10 +220,13 @@ private void EndLoadingCollections(ICollectionPersister persister, IList cacheBatcher.AddToBatch(persister, data)); } + cacheBatcher.ExecuteBatch(); if (log.IsDebugEnabled()) { @@ -230,7 +234,8 @@ private void EndLoadingCollections(ICollectionPersister persister, IList cacheBatchingHandler) { if (log.IsDebugEnabled()) { @@ -269,7 +274,7 @@ private void EndLoadingCollection(LoadingCollectionEntry lce, ICollectionPersist if (addToCache) { - AddCollectionToCache(lce, persister); + AddCollectionToCache(lce, persister, cacheBatchingHandler); } if (log.IsDebugEnabled()) @@ -287,7 +292,9 @@ private void EndLoadingCollection(LoadingCollectionEntry lce, ICollectionPersist /// Add the collection to the second-level cache /// The entry representing the collection to add /// The persister - private void AddCollectionToCache(LoadingCollectionEntry lce, ICollectionPersister persister) + /// The action for handling cache batching + private void AddCollectionToCache(LoadingCollectionEntry lce, ICollectionPersister persister, + Action cacheBatchingHandler) { ISessionImplementor session = LoadContext.PersistenceContext.Session; ISessionFactoryImplementor factory = session.Factory; @@ -327,13 +334,27 @@ private void AddCollectionToCache(LoadingCollectionEntry lce, ICollectionPersist CollectionCacheEntry entry = new CollectionCacheEntry(lce.Collection, persister); CacheKey cacheKey = session.GenerateCacheKey(lce.Key, persister.KeyType, persister.Role); - bool put = persister.Cache.Put(cacheKey, persister.CacheEntryStructure.Structure(entry), - session.Timestamp, version, versionComparator, - factory.Settings.IsMinimalPutsEnabled && session.CacheMode != CacheMode.Refresh); - if (put && factory.Statistics.IsStatisticsEnabled) + if (persister.GetBatchSize() > 1 && persister.Cache.IsBatchingPutSupported()) { - factory.StatisticsImplementor.SecondLevelCachePut(persister.Cache.RegionName); + cacheBatchingHandler( + new CachePutData( + cacheKey, + persister.CacheEntryStructure.Structure(entry), + version, + versionComparator, + factory.Settings.IsMinimalPutsEnabled && session.CacheMode != CacheMode.Refresh)); + } + else + { + bool put = persister.Cache.Put(cacheKey, persister.CacheEntryStructure.Structure(entry), + session.Timestamp, version, versionComparator, + factory.Settings.IsMinimalPutsEnabled && session.CacheMode != CacheMode.Refresh); + + if (put && factory.Statistics.IsStatisticsEnabled) + { + factory.StatisticsImplementor.SecondLevelCachePut(persister.Cache.RegionName); + } } } diff --git a/src/NHibernate/Engine/TwoPhaseLoad.cs b/src/NHibernate/Engine/TwoPhaseLoad.cs index 54786a29d4e..c2df76ce4dc 100644 --- a/src/NHibernate/Engine/TwoPhaseLoad.cs +++ b/src/NHibernate/Engine/TwoPhaseLoad.cs @@ -9,6 +9,7 @@ using NHibernate.Proxy; using NHibernate.Type; using NHibernate.Properties; +using System; namespace NHibernate.Engine { @@ -47,6 +48,18 @@ public static void PostHydrate(IEntityPersister persister, object id, object[] v /// "hydrated" into an array /// public static void InitializeEntity(object entity, bool readOnly, ISessionImplementor session, PreLoadEvent preLoadEvent, PostLoadEvent postLoadEvent) + { + InitializeEntity(entity, readOnly, session, preLoadEvent, postLoadEvent, null); + } + + /// + /// Perform the second step of 2-phase load. Fully initialize the entity instance. + /// After processing a JDBC result set, we "resolve" all the associations + /// between the entities which were instantiated and had their state + /// "hydrated" into an array + /// + internal static void InitializeEntity(object entity, bool readOnly, ISessionImplementor session, PreLoadEvent preLoadEvent, PostLoadEvent postLoadEvent, + Action cacheBatchingHandler) { //TODO: Should this be an InitializeEntityEventListener??? (watch out for performance!) @@ -107,14 +120,29 @@ public static void InitializeEntity(object entity, bool readOnly, ISessionImplem CacheEntry entry = new CacheEntry(hydratedState, persister, entityEntry.LoadedWithLazyPropertiesUnfetched, version, session, entity); CacheKey cacheKey = session.GenerateCacheKey(id, persister.IdentifierType, persister.RootEntityName); - bool put = - persister.Cache.Put(cacheKey, persister.CacheEntryStructure.Structure(entry), session.Timestamp, version, - persister.IsVersioned ? persister.VersionType.Comparator : null, - UseMinimalPuts(session, entityEntry)); - if (put && factory.Statistics.IsStatisticsEnabled) + if (cacheBatchingHandler != null && persister.IsBatchLoadable && persister.Cache.IsBatchingPutSupported()) + { + cacheBatchingHandler( + persister, + new CachePutData( + cacheKey, + persister.CacheEntryStructure.Structure(entry), + version, + persister.IsVersioned ? persister.VersionType.Comparator : null, + UseMinimalPuts(session, entityEntry))); + } + else { - factory.StatisticsImplementor.SecondLevelCachePut(persister.Cache.RegionName); + bool put = + persister.Cache.Put(cacheKey, persister.CacheEntryStructure.Structure(entry), session.Timestamp, version, + persister.IsVersioned ? persister.VersionType.Comparator : null, + UseMinimalPuts(session, entityEntry)); + + if (put && factory.Statistics.IsStatisticsEnabled) + { + factory.StatisticsImplementor.SecondLevelCachePut(persister.Cache.RegionName); + } } } diff --git a/src/NHibernate/Event/Default/DefaultInitializeCollectionEventListener.cs b/src/NHibernate/Event/Default/DefaultInitializeCollectionEventListener.cs index 2f37f28d1ea..f1bdf215f8b 100644 --- a/src/NHibernate/Event/Default/DefaultInitializeCollectionEventListener.cs +++ b/src/NHibernate/Event/Default/DefaultInitializeCollectionEventListener.cs @@ -1,4 +1,5 @@ using System; +using System.Collections.Generic; using System.Diagnostics; using NHibernate.Cache; @@ -76,48 +77,78 @@ private bool InitializeCollectionFromCache(object id, ICollectionPersister persi { return false; } - else - { - ISessionFactoryImplementor factory = source.Factory; - - CacheKey ck = source.GenerateCacheKey(id, persister.KeyType, persister.Role); - object ce = persister.Cache.Get(ck, source.Timestamp); - if (factory.Statistics.IsStatisticsEnabled) + var batchSize = persister.GetBatchSize(); + if (batchSize > 1 && persister.Cache.IsBatchingGetSupported()) + { + var collectionEntries = new CollectionEntry[batchSize]; + // The first item in the array is the item that we want to load + var collectionBatch = source.PersistenceContext.BatchFetchQueue + .GetCollectionBatch(persister, id, batchSize, false, collectionEntries); + // Ignore null values as the retrieved batch may contains them when there are not enough + // uninitialized collection in the queue + var keys = new List(batchSize); + for (var i = 0; i < collectionBatch.Length; i++) { - if (ce == null) + var key = collectionBatch[i]; + if (key == null) { - factory.StatisticsImplementor.SecondLevelCacheMiss(persister.Cache.RegionName); - } - else - { - factory.StatisticsImplementor.SecondLevelCacheHit(persister.Cache.RegionName); + break; } + keys.Add(source.GenerateCacheKey(key, persister.KeyType, persister.Role)); } - - if (ce == null) + var cachedObjects = persister.Cache.GetMany(keys.ToArray(), source.Timestamp); + for (var i = 1; i < cachedObjects.Length; i++) { - log.Debug("Collection cache miss: {0}", ck); - } - else - { - log.Debug("Collection cache hit: {0}", ck); + var coll = source.PersistenceContext.BatchFetchQueue.GetBatchLoadableCollection(persister, collectionEntries[i]); + Assemble(keys[i], cachedObjects[i], persister, source, coll, collectionBatch[i], false); } + return Assemble(keys[0], cachedObjects[0], persister, source, collection, id, true); + } + + var cacheKey = source.GenerateCacheKey(id, persister.KeyType, persister.Role); + var cachedObject = persister.Cache.Get(cacheKey, source.Timestamp); + return Assemble(cacheKey, cachedObject, persister, source, collection, id, true); + } + private bool Assemble(CacheKey ck, object ce, ICollectionPersister persister, ISessionImplementor source, + IPersistentCollection collection, object id, bool alterStatistics) + { + ISessionFactoryImplementor factory = source.Factory; + if (factory.Statistics.IsStatisticsEnabled && alterStatistics) + { if (ce == null) { - return false; + factory.StatisticsImplementor.SecondLevelCacheMiss(persister.Cache.RegionName); } else { - IPersistenceContext persistenceContext = source.PersistenceContext; + factory.StatisticsImplementor.SecondLevelCacheHit(persister.Cache.RegionName); + } + } - CollectionCacheEntry cacheEntry = (CollectionCacheEntry)persister.CacheEntryStructure.Destructure(ce, factory); - cacheEntry.Assemble(collection, persister, persistenceContext.GetCollectionOwner(id, persister)); + if (ce == null) + { + log.Debug("Collection cache miss: {0}", ck); + } + else + { + log.Debug("Collection cache hit: {0}", ck); + } - persistenceContext.GetCollectionEntry(collection).PostInitialize(collection, persistenceContext); - return true; - } + if (ce == null) + { + return false; + } + else + { + IPersistenceContext persistenceContext = source.PersistenceContext; + + CollectionCacheEntry cacheEntry = (CollectionCacheEntry) persister.CacheEntryStructure.Destructure(ce, factory); + cacheEntry.Assemble(collection, persister, persistenceContext.GetCollectionOwner(id, persister)); + + persistenceContext.GetCollectionEntry(collection).PostInitialize(collection, persistenceContext); + return true; } } } diff --git a/src/NHibernate/Event/Default/DefaultLoadEventListener.cs b/src/NHibernate/Event/Default/DefaultLoadEventListener.cs index b919263755d..df2a49b4a57 100644 --- a/src/NHibernate/Event/Default/DefaultLoadEventListener.cs +++ b/src/NHibernate/Event/Default/DefaultLoadEventListener.cs @@ -1,4 +1,5 @@ using System; +using System.Collections.Generic; using System.Diagnostics; using System.Text; using NHibernate.Cache; @@ -416,14 +417,47 @@ protected virtual object LoadFromSecondLevelCache(LoadEvent @event, IEntityPersi bool useCache = persister.HasCache && source.CacheMode .HasFlag(CacheMode.Get) && @event.LockMode.LessThan(LockMode.Read); - if (useCache) + if (!useCache) { - ISessionFactoryImplementor factory = source.Factory; - - CacheKey ck = source.GenerateCacheKey(@event.EntityId, persister.IdentifierType, persister.RootEntityName); - object ce = persister.Cache.Get(ck, source.Timestamp); + return null; + } + ISessionFactoryImplementor factory = source.Factory; + var batchSize = persister.GetBatchSize(); + if (batchSize > 1 && persister.Cache.IsBatchingGetSupported()) + { + // The first item in the array is the item that we want to load + var entityBatch = + source.PersistenceContext.BatchFetchQueue.GetEntityBatch(persister, @event.EntityId, batchSize, false); + // Ignore null values as the retrieved batch may contains them when there are not enough + // uninitialized entities in the queue + var keys = new List(batchSize); + for (var i = 0; i < entityBatch.Length; i++) + { + var key = entityBatch[i]; + if (key == null) + { + break; + } + keys.Add(source.GenerateCacheKey(key, persister.IdentifierType, persister.RootEntityName)); + } + var cachedObjects = persister.Cache.GetMany(keys.ToArray(), source.Timestamp); + for (var i = 1; i < cachedObjects.Length; i++) + { + Assemble( + keys[i], + cachedObjects[i], + new LoadEvent(entityBatch[i], @event.EntityClassName, @event.LockMode, @event.Session), + false); + } + return Assemble(keys[0], cachedObjects[0], @event, true); + } + var cacheKey = source.GenerateCacheKey(@event.EntityId, persister.IdentifierType, persister.RootEntityName); + var cachedObject = persister.Cache.Get(cacheKey, source.Timestamp); + return Assemble(cacheKey, cachedObject, @event, true); - if (factory.Statistics.IsStatisticsEnabled) + object Assemble(CacheKey ck, object ce, LoadEvent evt, bool alterStatistics) + { + if (factory.Statistics.IsStatisticsEnabled && alterStatistics) { if (ce == null) { @@ -445,12 +479,12 @@ protected virtual object LoadFromSecondLevelCache(LoadEvent @event, IEntityPersi // NH: Different behavior (take a look to options.ExactPersister (NH-295)) if (!options.ExactPersister || persister.EntityMetamodel.SubclassEntityNames.Contains(entry.Subclass)) { - return AssembleCacheEntry(entry, @event.EntityId, persister, @event); + return AssembleCacheEntry(entry, evt.EntityId, persister, evt); } } - } - return null; + return null; + } } private object AssembleCacheEntry(CacheEntry entry, object id, IEntityPersister persister, LoadEvent @event) diff --git a/src/NHibernate/Loader/Loader.cs b/src/NHibernate/Loader/Loader.cs index 598bcb39c59..137e943aaba 100644 --- a/src/NHibernate/Loader/Loader.cs +++ b/src/NHibernate/Loader/Loader.cs @@ -641,10 +641,13 @@ internal void InitializeEntitiesAndCollections(IList hydratedObjects, object res Log.Debug("total objects hydrated: {0}", hydratedObjectsSize); } + var cacheBatcher = new CacheBatcher(session); for (int i = 0; i < hydratedObjectsSize; i++) { - TwoPhaseLoad.InitializeEntity(hydratedObjects[i], readOnly, session, pre, post); + TwoPhaseLoad.InitializeEntity(hydratedObjects[i], readOnly, session, pre, post, + (persister, data) => cacheBatcher.AddToBatch(persister, data)); } + cacheBatcher.ExecuteBatch(); } if (collectionPersisters != null) diff --git a/src/NHibernate/Persister/Entity/AbstractEntityPersister.cs b/src/NHibernate/Persister/Entity/AbstractEntityPersister.cs index df4dd1e4b18..2c8fa2401ee 100644 --- a/src/NHibernate/Persister/Entity/AbstractEntityPersister.cs +++ b/src/NHibernate/Persister/Entity/AbstractEntityPersister.cs @@ -637,6 +637,8 @@ public bool IsBatchLoadable get { return batchSize > 1; } } + public int BatchSize => batchSize; + public virtual string[] IdentifierColumnNames { get { return rootTableKeyColumnNames; } diff --git a/src/NHibernate/Persister/Entity/IEntityPersister.cs b/src/NHibernate/Persister/Entity/IEntityPersister.cs index 88f1c65b380..585222d4be4 100644 --- a/src/NHibernate/Persister/Entity/IEntityPersister.cs +++ b/src/NHibernate/Persister/Entity/IEntityPersister.cs @@ -599,4 +599,25 @@ void Update( IEntityTuplizer EntityTuplizer { get; } } + + internal static class EntityPersisterExtensions + { + /// + /// Get the batch size of a entity persister. + /// + //6.0 TODO: Merge into IEntityPersister. + public static int GetBatchSize(this IEntityPersister persister) + { + if (persister is AbstractEntityPersister acp) + { + return acp.BatchSize; + } + + NHibernateLogger + .For(typeof(EntityPersisterExtensions)) + .Warn("Entity persister of {0} type is not supported, returning 1 as a batch size.", persister?.GetType()); + + return 1; + } + } }