diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCache.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCache.java index 1177ff68c34c4..18c13860efd6a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCache.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCache.java @@ -55,32 +55,34 @@ /** * This is a cache for {@link BitSet} instances that are used with the {@link DocumentSubsetReader}. * It is bounded by memory size and access time. - * + *

* DLS uses {@link BitSet} instances to track which documents should be visible to the user ("live") and which should not ("dead"). * This means that there is a bit for each document in a Lucene index (ES shard). * Consequently, an index with 10 million document will use more than 1Mb of bitset memory for every unique DLS query, and an index * with 1 billion documents will use more than 100Mb of memory per DLS query. * Because DLS supports templating queries based on user metadata, there may be many distinct queries in use for each index, even if * there is only a single active role. - * + *

* The primary benefit of the cache is to avoid recalculating the "live docs" (visible documents) when a user performs multiple * consecutive queries across one or more large indices. Given the memory examples above, the cache is only useful if it can hold at * least 1 large (100Mb or more ) {@code BitSet} during a user's active session, and ideally should be capable of support multiple * simultaneous users with distinct DLS queries. - * + *

* For this reason the default memory usage (weight) for the cache set to 10% of JVM heap ({@link #CACHE_SIZE_SETTING}), so that it * automatically scales with the size of the Elasticsearch deployment, and can provide benefit to most use cases without needing * customisation. On a 32Gb heap, a 10% cache would be 3.2Gb which is large enough to store BitSets representing 25 billion docs. - * + *

* However, because queries can be templated by user metadata and that metadata can change frequently, it is common for the - * effetively lifetime of a single DLS query to be relatively short. We do not want to sacrifice 10% of heap to a cache that is storing - * BitSets that are not longer needed, so we set the TTL on this cache to be 2 hours ({@link #CACHE_TTL_SETTING}). This time has been + * effective lifetime of a single DLS query to be relatively short. We do not want to sacrifice 10% of heap to a cache that is storing + * BitSets that are no longer needed, so we set the TTL on this cache to be 2 hours ({@link #CACHE_TTL_SETTING}). This time has been * chosen so that it will retain BitSets that are in active use during a user's session, but not be an ongoing drain on memory. * * @see org.elasticsearch.index.cache.bitset.BitsetFilterCache */ public final class DocumentSubsetBitsetCache implements IndexReader.ClosedListener, Closeable, Accountable { + private static final Logger logger = LogManager.getLogger(DocumentSubsetBitsetCache.class); + /** * The TTL defaults to 2 hours. We default to a large cache size ({@link #CACHE_SIZE_SETTING}), and aggressively * expire unused entries so that the cache does not hold on to memory unnecessarily. @@ -102,8 +104,6 @@ public final class DocumentSubsetBitsetCache implements IndexReader.ClosedListen private static final BitSet NULL_MARKER = new FixedBitSet(0); - private static final Logger logger = LogManager.getLogger(DocumentSubsetBitsetCache.class); - /** * When a {@link BitSet} is evicted from {@link #bitsetCache}, we need to also remove it from {@link #keysByIndex}. * We use a {@link ReentrantReadWriteLock} to control atomicity here - the "read" side represents potential insertions to the @@ -130,7 +130,8 @@ public DocumentSubsetBitsetCache(Settings settings, ThreadPool threadPool) { * @param cleanupExecutor An executor on which the cache cleanup tasks can be run. Due to the way the cache is structured internally, * it is sometimes necessary to run an asynchronous task to synchronize the internal state. */ - protected DocumentSubsetBitsetCache(Settings settings, ExecutorService cleanupExecutor) { + // visible for testing + DocumentSubsetBitsetCache(Settings settings, ExecutorService cleanupExecutor) { final ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock(); this.cacheEvictionLock = new ReleasableLock(readWriteLock.writeLock()); this.cacheModificationLock = new ReleasableLock(readWriteLock.readLock()); @@ -171,7 +172,7 @@ private void onCacheEviction(RemovalNotification notific } // We push this to a background thread, so that it reduces the risk of blocking searches, but also so that the lock management is // simpler - this callback is likely to take place on a thread that is actively adding something to the cache, and is therefore - // holding the read ("update") side of the lock. It is not possible to upgrade a read lock to a write ("eviction") lock, but we + // holding the read ("update") side of the lock. It is not possible to upgrade a read lock to a write lock ("eviction"), but we // need to acquire that lock here. cleanupExecutor.submit(() -> { try (ReleasableLock ignored = cacheEvictionLock.acquire()) { @@ -214,7 +215,7 @@ public long ramBytesUsed() { /** * Obtain the {@link BitSet} for the given {@code query} in the given {@code context}. * If there is a cached entry for that query and context, it will be returned. - * Otherwise a new BitSet will be created and stored in the cache. + * Otherwise, a new BitSet will be created and stored in the cache. * The returned BitSet may be null (e.g. if the query has no results). */ @Nullable @@ -289,7 +290,7 @@ private static BitSet computeBitSet(Query query, LeafReaderContext context) thro // Package private for testing static boolean isEffectiveMatchAllDocsQuery(Query rewrittenQuery) { - if (rewrittenQuery instanceof ConstantScoreQuery && ((ConstantScoreQuery) rewrittenQuery).getQuery() instanceof MatchAllDocsQuery) { + if (rewrittenQuery instanceof ConstantScoreQuery csq && csq.getQuery() instanceof MatchAllDocsQuery) { return true; } if (rewrittenQuery instanceof MatchAllDocsQuery) { @@ -322,7 +323,8 @@ public Map usageStats() { return Map.of("count", entryCount(), "memory", ram.toString(), "memory_in_bytes", ram.getBytes()); } - private static class BitsetCacheKey { + private static final class BitsetCacheKey { + final IndexReader.CacheKey index; final Query query; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReader.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReader.java index 50089309caddc..d61a2489f657e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReader.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetReader.java @@ -195,8 +195,8 @@ public Bits getLiveDocs() { computeNumDocsIfNeeded(); final Bits actualLiveDocs = in.getLiveDocs(); if (roleQueryBits == null) { - // If we would return a null liveDocs then that would mean that no docs are marked as deleted, - // but that isn't the case. No docs match with the role query and therefore all docs are marked as deleted + // If we were to return a null liveDocs then that would mean that no docs are marked as deleted, + // but that isn't the case. No docs match with the role query and therefore all docs are marked as deleted. return new Bits.MatchNoBits(in.maxDoc()); } else if (roleQueryBits instanceof MatchAllBitSet) { return actualLiveDocs; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/DocumentPermissions.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/DocumentPermissions.java index 92bb037888495..bf9e73e4518ac 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/DocumentPermissions.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/DocumentPermissions.java @@ -106,7 +106,7 @@ public boolean hasStoredScript() throws IOException { /** * Creates a {@link BooleanQuery} to be used as filter to restrict access to documents.
- * Document permission queries are used to create an boolean query.
+ * Document permission queries are used to create a boolean query.
* If the document permissions are limited, then there is an additional filter added restricting access to documents only allowed by the * limited queries. * diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java index 5369c95ad6fa7..0645ea8b43b16 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java @@ -54,6 +54,7 @@ import java.util.Collections; import java.util.IdentityHashMap; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; @@ -62,8 +63,6 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -import static java.util.Collections.emptyList; -import static java.util.Collections.emptyMap; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; @@ -525,26 +524,13 @@ private void runTestOnIndex(CheckedBiConsumer true, null, - emptyMap(), + Map.of(), MapperMetrics.NOOP ); @@ -630,7 +616,7 @@ private void runTestOnIndices(int numberIndices, CheckedConsumer true, null, - emptyMap(), + Map.of(), MapperMetrics.NOOP ); SearchExecutionContext searchExecutionContext = spy(realSearchExecutionContext); @@ -154,7 +151,7 @@ public void testDLS() throws Exception { if (doc % 11 == 0) { iw.deleteDocuments(new Term("id", id)); } else { - if (commitAfter % commitAfter == 0) { + if (doc % commitAfter == 0) { iw.commit(); } valuesHitCount[valueIndex]++; @@ -172,7 +169,7 @@ public void testDLS() throws Exception { String termQuery = "{\"term\": {\"field\": \"" + values[i] + "\"} }"; IndicesAccessControl.IndexAccessControl indexAccessControl = new IndicesAccessControl.IndexAccessControl( FieldPermissions.DEFAULT, - DocumentPermissions.filteredBy(singleton(new BytesArray(termQuery))) + DocumentPermissions.filteredBy(Set.of(new BytesArray(termQuery))) ); SecurityIndexReaderWrapper wrapper = new SecurityIndexReaderWrapper( s -> searchExecutionContext, @@ -184,7 +181,7 @@ public void testDLS() throws Exception { @Override protected IndicesAccessControl getIndicesAccessControl() { - return new IndicesAccessControl(true, singletonMap("_index", indexAccessControl)); + return new IndicesAccessControl(true, Map.of("_index", indexAccessControl)); } }; @@ -237,9 +234,9 @@ public void testDLSWithLimitedPermissions() throws Exception { FieldPermissions.DEFAULT, DocumentPermissions.filteredBy(queries) ); - queries = singleton(new BytesArray("{\"terms\" : { \"f1\" : [\"fv11\", \"fv21\", \"fv31\"] } }")); + queries = Set.of(new BytesArray("{\"terms\" : { \"f1\" : [\"fv11\", \"fv21\", \"fv31\"] } }")); if (restrictiveLimitedIndexPermissions) { - queries = singleton(new BytesArray("{\"terms\" : { \"f1\" : [\"fv11\", \"fv31\"] } }")); + queries = Set.of(new BytesArray("{\"terms\" : { \"f1\" : [\"fv11\", \"fv31\"] } }")); } IndicesAccessControl.IndexAccessControl limitedIndexAccessControl = new IndicesAccessControl.IndexAccessControl( FieldPermissions.DEFAULT, @@ -271,7 +268,7 @@ public void testDLSWithLimitedPermissions() throws Exception { null, () -> true, null, - emptyMap(), + Map.of(), MapperMetrics.NOOP ); SearchExecutionContext searchExecutionContext = spy(realSearchExecutionContext); @@ -289,13 +286,13 @@ public void testDLSWithLimitedPermissions() throws Exception { @Override protected IndicesAccessControl getIndicesAccessControl() { - IndicesAccessControl indicesAccessControl = new IndicesAccessControl(true, singletonMap("_index", indexAccessControl)); + IndicesAccessControl indicesAccessControl = new IndicesAccessControl(true, Map.of("_index", indexAccessControl)); if (noFilteredIndexPermissions) { return indicesAccessControl; } IndicesAccessControl limitedByIndicesAccessControl = new IndicesAccessControl( true, - singletonMap("_index", limitedIndexAccessControl) + Map.of("_index", limitedIndexAccessControl) ); return indicesAccessControl.limitIndicesAccessControl(limitedByIndicesAccessControl); } @@ -492,11 +489,7 @@ public void testDLSWithNestedDocs() throws Exception { @Override protected IndicesAccessControl getIndicesAccessControl() { - IndicesAccessControl indicesAccessControl = new IndicesAccessControl( - true, - singletonMap(indexSettings().getIndex().getName(), indexAccessControl) - ); - return indicesAccessControl; + return new IndicesAccessControl(true, Map.of(indexSettings().getIndex().getName(), indexAccessControl)); } }; @@ -522,6 +515,6 @@ protected IndicesAccessControl getIndicesAccessControl() { private static MappingLookup createMappingLookup(List concreteFields) { List mappers = concreteFields.stream().map(MockFieldMapper::new).collect(Collectors.toList()); - return MappingLookup.fromMappers(Mapping.EMPTY, mappers, emptyList()); + return MappingLookup.fromMappers(Mapping.EMPTY, mappers, List.of()); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperUnitTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperUnitTests.java index 104f6f2847ab0..5ac21a4c126ba 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperUnitTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/SecurityIndexReaderWrapperUnitTests.java @@ -30,12 +30,12 @@ import org.junit.After; import org.junit.Before; -import java.util.Arrays; import java.util.Collections; import java.util.HashSet; +import java.util.List; +import java.util.Map; import java.util.Set; -import static java.util.Collections.singletonMap; import static org.elasticsearch.xpack.core.security.SecurityField.DOCUMENT_LEVEL_SECURITY_FEATURE; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.sameInstance; @@ -85,7 +85,7 @@ public void tearDown() throws Exception { esIn.close(); } - public void testDefaultMetaFields() throws Exception { + public void testDefaultMetaFields() { securityIndexReaderWrapper = new SecurityIndexReaderWrapper(null, null, securityContext, licenseState, scriptService) { @Override protected IndicesAccessControl getIndicesAccessControl() { @@ -93,7 +93,7 @@ protected IndicesAccessControl getIndicesAccessControl() { new FieldPermissions(fieldPermissionDef(new String[] {}, null)), DocumentPermissions.allowAll() ); - return new IndicesAccessControl(true, singletonMap("_index", indexAccessControl)); + return new IndicesAccessControl(true, Map.of("_index", indexAccessControl)); } }; @@ -115,14 +115,14 @@ protected IndicesAccessControl getIndicesAccessControl() { assertThat(result.getFilter().run("some_random_regular_field"), is(false)); } - public void testWrapReaderWhenFeatureDisabled() throws Exception { + public void testWrapReaderWhenFeatureDisabled() { when(licenseState.isAllowed(DOCUMENT_LEVEL_SECURITY_FEATURE)).thenReturn(false); securityIndexReaderWrapper = new SecurityIndexReaderWrapper(null, null, securityContext, licenseState, scriptService); DirectoryReader reader = securityIndexReaderWrapper.apply(esIn); assertThat(reader, sameInstance(esIn)); } - public void testWildcards() throws Exception { + public void testWildcards() { Set expected = new HashSet<>(META_FIELDS); expected.add("field1_a"); expected.add("field1_b"); @@ -130,7 +130,7 @@ public void testWildcards() throws Exception { assertResolved(new FieldPermissions(fieldPermissionDef(new String[] { "field1*" }, null)), expected, "field", "field2"); } - public void testDotNotion() throws Exception { + public void testDotNotion() { Set expected = new HashSet<>(META_FIELDS); expected.add("foo.bar"); assertResolved(new FieldPermissions(fieldPermissionDef(new String[] { "foo.bar" }, null)), expected, "foo", "foo.baz", "bar.foo"); @@ -149,7 +149,7 @@ private void assertResolved(FieldPermissions permissions, Set expected, } } - public void testFieldPermissionsWithFieldExceptions() throws Exception { + public void testFieldPermissionsWithFieldExceptions() { securityIndexReaderWrapper = new SecurityIndexReaderWrapper(null, null, securityContext, licenseState, null); String[] grantedFields = new String[] {}; String[] deniedFields; @@ -166,7 +166,7 @@ public void testFieldPermissionsWithFieldExceptions() throws Exception { deniedFields = META_FIELDS.toArray(new String[0]); assertResolved( new FieldPermissions(fieldPermissionDef(null, deniedFields)), - new HashSet<>(Arrays.asList("foo", "bar", "_some_plugin_meta_field")) + new HashSet<>(List.of("foo", "bar", "_some_plugin_meta_field")) ); // check we can add all fields with *