Skip to content

Commit 50ebdd3

Browse files
committed
rename
1 parent 1c7de75 commit 50ebdd3

File tree

1 file changed

+5
-5
lines changed

1 file changed

+5
-5
lines changed

x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSliceQueue.java

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -213,7 +213,7 @@ public enum PartitioningStrategy implements Writeable {
213213
*/
214214
SHARD(0) {
215215
@Override
216-
List<List<PartialLeafReaderContext>> groups(IndexSearcher searcher, int requestedNumSlices) {
216+
List<List<PartialLeafReaderContext>> groups(IndexSearcher searcher, int taskConcurrency) {
217217
return List.of(searcher.getLeafContexts().stream().map(PartialLeafReaderContext::new).toList());
218218
}
219219
},
@@ -222,7 +222,7 @@ List<List<PartialLeafReaderContext>> groups(IndexSearcher searcher, int requeste
222222
*/
223223
SEGMENT(1) {
224224
@Override
225-
List<List<PartialLeafReaderContext>> groups(IndexSearcher searcher, int requestedNumSlices) {
225+
List<List<PartialLeafReaderContext>> groups(IndexSearcher searcher, int taskConcurrency) {
226226
IndexSearcher.LeafSlice[] gs = IndexSearcher.slices(
227227
searcher.getLeafContexts(),
228228
MAX_DOCS_PER_SLICE,
@@ -237,10 +237,10 @@ List<List<PartialLeafReaderContext>> groups(IndexSearcher searcher, int requeste
237237
*/
238238
DOC(2) {
239239
@Override
240-
List<List<PartialLeafReaderContext>> groups(IndexSearcher searcher, int requestedNumSlices) {
240+
List<List<PartialLeafReaderContext>> groups(IndexSearcher searcher, int taskConcurrency) {
241241
final int totalDocCount = searcher.getIndexReader().maxDoc();
242242
// Cap the desired slice to prevent CPU underutilization when matching documents are concentrated in one segment region.
243-
int desiredSliceSize = Math.clamp(Math.ceilDiv(totalDocCount, requestedNumSlices), 1, MAX_DOCS_PER_SLICE);
243+
int desiredSliceSize = Math.clamp(Math.ceilDiv(totalDocCount, taskConcurrency), 1, MAX_DOCS_PER_SLICE);
244244
return new AdaptivePartitioner(Math.max(1, desiredSliceSize), MAX_SEGMENTS_PER_SLICE).partition(searcher.getLeafContexts());
245245
}
246246
};
@@ -266,7 +266,7 @@ public void writeTo(StreamOutput out) throws IOException {
266266
out.writeByte(id);
267267
}
268268

269-
abstract List<List<PartialLeafReaderContext>> groups(IndexSearcher searcher, int requestedNumSlices);
269+
abstract List<List<PartialLeafReaderContext>> groups(IndexSearcher searcher, int taskConcurrency);
270270

271271
private static PartitioningStrategy pick(
272272
DataPartitioning dataPartitioning,

0 commit comments

Comments
 (0)