Skip to content
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions docs/changelog/132845.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 132845
summary: Expose existing DLS cache x-pack usage statistics
area: Authorization
type: enhancement
issues: []
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,8 @@

import java.io.Closeable;
import java.io.IOException;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
Expand Down Expand Up @@ -320,7 +322,16 @@ public static List<Setting<?>> getSettings() {

public Map<String, Object> usageStats() {
final ByteSizeValue ram = ByteSizeValue.ofBytes(ramBytesUsed());
return Map.of("count", entryCount(), "memory", ram.toString(), "memory_in_bytes", ram.getBytes());
final Cache.Stats cacheStats = bitsetCache.stats();

final Map<String, Object> stats = new LinkedHashMap<>();
stats.put("count", entryCount());
stats.put("memory", ram.toString());
stats.put("memory_in_bytes", ram.getBytes());
stats.put("hits", cacheStats.getHits());
stats.put("misses", cacheStats.getMisses());
stats.put("evictions", cacheStats.getEvictions());
return Collections.unmodifiableMap(stats);
}

private static final class BitsetCacheKey {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@
import java.util.ArrayList;
import java.util.Collections;
import java.util.IdentityHashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
Expand All @@ -64,6 +65,7 @@
import java.util.concurrent.atomic.AtomicReference;

import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.notNullValue;
Expand Down Expand Up @@ -396,9 +398,9 @@ public void testCacheUnderConcurrentAccess() throws Exception {
cache.verifyInternalConsistency();

// Due to cache evictions, we must get more bitsets than fields
assertThat(uniqueBitSets.size(), Matchers.greaterThan(FIELD_COUNT));
assertThat(uniqueBitSets.size(), greaterThan(FIELD_COUNT));
// Due to cache evictions, we must have seen more bitsets than the cache currently holds
assertThat(uniqueBitSets.size(), Matchers.greaterThan(cache.entryCount()));
assertThat(uniqueBitSets.size(), greaterThan(cache.entryCount()));
// Even under concurrent pressure, the cache should hit the expected size
assertThat(cache.entryCount(), is(maxCacheCount));
assertThat(cache.ramBytesUsed(), is(maxCacheBytes));
Expand Down Expand Up @@ -517,6 +519,54 @@ public void testEquivalentMatchAllDocsQuery() {
assertFalse(DocumentSubsetBitsetCache.isEffectiveMatchAllDocsQuery(new TermQuery(new Term("term"))));
}

public void testHitsMissesAndEvictionsStats() throws Exception {
// cache that will evict all-but-one element, to test evictions
final long maxCacheBytes = EXPECTED_BYTES_PER_BIT_SET + (EXPECTED_BYTES_PER_BIT_SET / 2);
final Settings settings = Settings.builder()
.put(DocumentSubsetBitsetCache.CACHE_SIZE_SETTING.getKey(), maxCacheBytes + "b")
.build();
final DocumentSubsetBitsetCache cache = newCache(settings);

final Map<String, Object> expectedStats = new LinkedHashMap<>();
expectedStats.put("count", 0);
expectedStats.put("memory", "0b");
expectedStats.put("memory_in_bytes", 0L);
expectedStats.put("hits", 0L);
expectedStats.put("misses", 0L);
expectedStats.put("evictions", 0L);
assertThat(cache.usageStats(), equalTo(expectedStats));

runTestOnIndex((searchExecutionContext, leafContext) -> {
// first lookup - miss
final Query query1 = QueryBuilders.termQuery("field-1", "value-1").toQuery(searchExecutionContext);
final BitSet bitSet1 = cache.getBitSet(query1, leafContext);
assertThat(bitSet1, notNullValue());

// second same lookup - hit
final BitSet bitSet1Again = cache.getBitSet(query1, leafContext);
assertThat(bitSet1Again, sameInstance(bitSet1));

expectedStats.put("hits", 1L);
expectedStats.put("misses", 1L);
expectedStats.put("count", 1);
expectedStats.put("memory", EXPECTED_BYTES_PER_BIT_SET + "b");
expectedStats.put("memory_in_bytes", EXPECTED_BYTES_PER_BIT_SET);
assertThat(cache.usageStats(), equalTo(expectedStats));

// second query - miss, should evict the first one
final Query query2 = QueryBuilders.termQuery("field-2", "value-2").toQuery(searchExecutionContext);
final BitSet bitSet2 = cache.getBitSet(query2, leafContext);
assertThat(bitSet2, notNullValue());

// szymon: eviction callback calls `get` on the cache, asynchronously, which updates the stats.
// so assertion is current state of the code, rather than the expected state.
// issue: https://github.com/elastic/elasticsearch/issues/132842
expectedStats.put("misses", 3L);
expectedStats.put("evictions", 1L);
assertBusy(() -> { assertThat(cache.usageStats(), equalTo(expectedStats)); }, 200, TimeUnit.MILLISECONDS);
});
}

private void runTestOnIndex(CheckedBiConsumer<SearchExecutionContext, LeafReaderContext, Exception> body) throws Exception {
runTestOnIndices(1, ctx -> {
final TestIndexContext indexContext = ctx.get(0);
Expand Down Expand Up @@ -638,5 +688,4 @@ private void runTestOnIndices(int numberIndices, CheckedConsumer<List<TestIndexC
private DocumentSubsetBitsetCache newCache(Settings settings) {
return new DocumentSubsetBitsetCache(settings, singleThreadExecutor);
}

}