2626import org .elasticsearch .common .unit .ByteSizeValue ;
2727import org .elasticsearch .core .Nullable ;
2828import org .elasticsearch .core .Predicates ;
29- import org .elasticsearch .index .IndexService ;
3029import org .elasticsearch .index .cache .query .QueryCacheStats ;
31- import org .elasticsearch .index .shard .IndexShard ;
3230import org .elasticsearch .index .shard .ShardId ;
3331
3432import java .io .Closeable ;
3533import java .io .IOException ;
3634import java .util .Collections ;
37- import java .util .HashMap ;
3835import java .util .IdentityHashMap ;
3936import java .util .Map ;
4037import java .util .Set ;
@@ -70,38 +67,6 @@ public class IndicesQueryCache implements QueryCache, Closeable {
7067 private final Map <ShardId , Stats > shardStats = new ConcurrentHashMap <>();
7168 private volatile long sharedRamBytesUsed ;
7269
73- /**
74- * Calculates a map of {@link ShardId} to {@link Long} which contains the calculated share of the {@link IndicesQueryCache} shared ram
75- * size for a given shard (that is, the sum of all the longs is the size of the indices query cache). Since many shards will not
76- * participate in the cache, shards whose calculated share is zero will not be contained in the map at all. As a consequence, the
77- * correct pattern for using the returned map will be via {@link Map#getOrDefault(Object, Object)} with a {@code defaultValue} of
78- * {@code 0L}.
79- */
80- public static Map <ShardId , Long > getSharedRamSizeForAllShards (IndicesService indicesService ) {
81- Map <ShardId , Long > shardIdToSharedRam = new HashMap <>();
82- IndicesQueryCache .CacheTotals cacheTotals = IndicesQueryCache .getCacheTotalsForAllShards (indicesService );
83- for (IndexService indexService : indicesService ) {
84- for (IndexShard indexShard : indexService ) {
85- final var queryCache = indicesService .getIndicesQueryCache ();
86- long sharedRam = (queryCache == null ) ? 0L : queryCache .getSharedRamSizeForShard (indexShard .shardId (), cacheTotals );
87- // as a size optimization, only store non-zero values in the map
88- if (sharedRam > 0L ) {
89- shardIdToSharedRam .put (indexShard .shardId (), sharedRam );
90- }
91- }
92- }
93- return Collections .unmodifiableMap (shardIdToSharedRam );
94- }
95-
96- public long getCacheSizeForShard (ShardId shardId ) {
97- Stats stats = shardStats .get (shardId );
98- return stats != null ? stats .cacheSize : 0L ;
99- }
100-
101- public long getSharedRamBytesUsed () {
102- return sharedRamBytesUsed ;
103- }
104-
10570 // This is a hack for the fact that the close listener for the
10671 // ShardCoreKeyMap will be called before onDocIdSetEviction
10772 // See onDocIdSetEviction for more info
@@ -124,58 +89,40 @@ private static QueryCacheStats toQueryCacheStatsSafe(@Nullable Stats stats) {
12489 return stats == null ? new QueryCacheStats () : stats .toQueryCacheStats ();
12590 }
12691
127- /**
128- * This computes the total cache size in bytes, and the total shard count in the cache for all shards.
129- * @param indicesService
130- * @return A CacheTotals object containing the computed total number of items in the cache and the number of shards seen in the cache
131- */
132- private static CacheTotals getCacheTotalsForAllShards (IndicesService indicesService ) {
133- IndicesQueryCache queryCache = indicesService .getIndicesQueryCache ();
134- boolean hasQueryCache = queryCache != null ;
92+ private long getShareOfAdditionalRamBytesUsed (long itemsInCacheForShard ) {
93+ if (sharedRamBytesUsed == 0L ) {
94+ return 0L ;
95+ }
96+
97+ /*
98+ * We have some shared ram usage that we try to distribute proportionally to the number of segment-requests in the cache for each
99+ * shard.
100+ */
101+ // TODO avoid looping over all local shards here - see https://github.com/elastic/elasticsearch/issues/97222
135102 long totalItemsInCache = 0L ;
136103 int shardCount = 0 ;
137- for (final IndexService indexService : indicesService ) {
138- for (final IndexShard indexShard : indexService ) {
139- final var shardId = indexShard .shardId ();
140- long cacheSize = hasQueryCache ? queryCache .getCacheSizeForShard (shardId ) : 0L ;
141- shardCount ++;
142- assert cacheSize >= 0 : "Unexpected cache size of " + cacheSize + " for shard " + shardId ;
143- totalItemsInCache += cacheSize ;
104+ if (itemsInCacheForShard == 0L ) {
105+ for (final var stats : shardStats .values ()) {
106+ shardCount += 1 ;
107+ if (stats .cacheSize > 0L ) {
108+ // some shard has nonzero cache footprint, so we apportion the shared size by cache footprint, and this shard has none
109+ return 0L ;
110+ }
111+ }
112+ } else {
113+ // branchless loop for the common case
114+ for (final var stats : shardStats .values ()) {
115+ shardCount += 1 ;
116+ totalItemsInCache += stats .cacheSize ;
144117 }
145- }
146- return new CacheTotals (totalItemsInCache , shardCount );
147- }
148-
149- public static long getSharedRamSizeForShard (IndicesService indicesService , ShardId shardId ) {
150- IndicesQueryCache .CacheTotals cacheTotals = IndicesQueryCache .getCacheTotalsForAllShards (indicesService );
151- final var queryCache = indicesService .getIndicesQueryCache ();
152- return (queryCache == null ) ? 0L : queryCache .getSharedRamSizeForShard (shardId , cacheTotals );
153- }
154-
155- /**
156- * This method computes the shared RAM size in bytes for the given indexShard.
157- * @param shardId The shard to compute the shared RAM size for
158- * @param cacheTotals Shard totals computed in getCacheTotalsForAllShards()
159- * @return the shared RAM size in bytes allocated to the given shard, or 0 if unavailable
160- */
161- private long getSharedRamSizeForShard (ShardId shardId , CacheTotals cacheTotals ) {
162- long sharedRamBytesUsed = getSharedRamBytesUsed ();
163- if (sharedRamBytesUsed == 0L ) {
164- return 0L ;
165118 }
166119
167- int shardCount = cacheTotals .shardCount ();
168120 if (shardCount == 0 ) {
169121 // Sometimes it's not possible to do this when there are no shard entries at all, which can happen as the shared ram usage can
170122 // extend beyond the closing of all shards.
171123 return 0L ;
172124 }
173- /*
174- * We have some shared ram usage that we try to distribute proportionally to the number of segment-requests in the cache for each
175- * shard.
176- */
177- long totalItemsInCache = cacheTotals .totalItemsInCache ();
178- long itemsInCacheForShard = getCacheSizeForShard (shardId );
125+
179126 final long additionalRamBytesUsed ;
180127 if (totalItemsInCache == 0 ) {
181128 // all shards have zero cache footprint, so we apportion the size of the shared bytes equally across all shards
@@ -196,12 +143,10 @@ private long getSharedRamSizeForShard(ShardId shardId, CacheTotals cacheTotals)
196143 return additionalRamBytesUsed ;
197144 }
198145
199- private record CacheTotals (long totalItemsInCache , int shardCount ) {}
200-
201146 /** Get usage statistics for the given shard. */
202- public QueryCacheStats getStats (ShardId shard , long precomputedSharedRamBytesUsed ) {
147+ public QueryCacheStats getStats (ShardId shard ) {
203148 final QueryCacheStats queryCacheStats = toQueryCacheStatsSafe (shardStats .get (shard ));
204- queryCacheStats .addRamBytesUsed (precomputedSharedRamBytesUsed );
149+ queryCacheStats .addRamBytesUsed (getShareOfAdditionalRamBytesUsed ( queryCacheStats . getCacheSize ()) );
205150 return queryCacheStats ;
206151 }
207152
@@ -298,7 +243,7 @@ QueryCacheStats toQueryCacheStats() {
298243 public String toString () {
299244 return "{shardId="
300245 + shardId
301- + ", ramBytesUsed ="
246+ + ", ramBytedUsed ="
302247 + ramBytesUsed
303248 + ", hitCount="
304249 + hitCount
@@ -395,7 +340,11 @@ protected void onDocIdSetCache(Object readerCoreKey, long ramBytesUsed) {
395340 shardStats .cacheCount += 1 ;
396341 shardStats .ramBytesUsed += ramBytesUsed ;
397342
398- StatsAndCount statsAndCount = stats2 .computeIfAbsent (readerCoreKey , ignored -> new StatsAndCount (shardStats ));
343+ StatsAndCount statsAndCount = stats2 .get (readerCoreKey );
344+ if (statsAndCount == null ) {
345+ statsAndCount = new StatsAndCount (shardStats );
346+ stats2 .put (readerCoreKey , statsAndCount );
347+ }
399348 statsAndCount .count += 1 ;
400349 }
401350
@@ -408,7 +357,7 @@ protected void onDocIdSetEviction(Object readerCoreKey, int numEntries, long sum
408357 if (numEntries > 0 ) {
409358 // We can't use ShardCoreKeyMap here because its core closed
410359 // listener is called before the listener of the cache which
411- // triggers this eviction. So instead we use stats2 that
360+ // triggers this eviction. So instead we use use stats2 that
412361 // we only evict when nothing is cached anymore on the segment
413362 // instead of relying on close listeners
414363 final StatsAndCount statsAndCount = stats2 .get (readerCoreKey );
0 commit comments